text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class TFBlenderbotSmallModelTester: config_cls = BlenderbotSmallConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotSmallModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBlenderbotSmallModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) all_generative_model_classes = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name == "TextGenerationPipelineTests" def setUp(self): self.model_tester = TFBlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tokenizers @require_tf class TFBlenderbot90MIntegrationTests(unittest.TestCase): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] model_name = "facebook/blenderbot_small-90M" @cached_property def tokenizer(self): # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_90_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=True, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
transformers/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py/0
{ "file_path": "transformers/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py", "repo_id": "transformers", "token_count": 4235 }
446
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch chameleon model.""" import unittest import pytest import requests from parameterized import parameterized from transformers import ChameleonConfig, is_torch_available, is_vision_available, set_seed from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_read_token, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_vision_available(): from PIL import Image if is_torch_available(): import torch from transformers import ( ChameleonForConditionalGeneration, ChameleonModel, ChameleonProcessor, ) class ChameleonModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_labels=True, vocab_size=99, image_token_id=98, hidden_size=32, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, vq_num_embeds=12, vq_embed_dim=12, vq_channel_multiplier=[1, 2], vq_img_token_start_id=10, # has to be less than vocab size when added with vq_num_embeds scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.image_token_id = image_token_id self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope self.vq_num_embeds = vq_num_embeds self.vq_embed_dim = vq_embed_dim self.vq_channel_multiplier = vq_channel_multiplier self.vq_img_token_start_id = vq_img_token_start_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): # create dummy vocab map for image2bpe mapping if it needs remapping # we assume that vocab size is big enough to accoun for image tokens somewhere in the beginning # same way as in real ckpt, when img tokens are in first half of embeds # we will need "vq_num_embeds" amount of tokens vocab_map = {i: chr(i) for i in range(self.vocab_size)} vocab_map[self.image_token_id] = "<image>" start = self.vq_img_token_start_id end = self.vq_img_token_start_id + self.vq_num_embeds for i in range(start, end): vocab_map[i] = f"IMGIMGBS{i}" # dummy str for each token, anything starting with IMGIMG return ChameleonConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, vocabulary_map={v: k for k, v in vocab_map.items()}, vq_config=self.get_vq_config(), ) def get_vq_config(self): return { "embed_dim": self.vq_embed_dim, "num_embeddings": self.vq_num_embeds, "latent_channels": self.vq_embed_dim, "in_channels": 3, "base_channels": 32, # we have a GroupNorm of 32 groups, so can't do less "channel_multiplier": self.vq_channel_multiplier, } def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = ChameleonModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = ChameleonForConditionalGeneration(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True model = ChameleonForConditionalGeneration(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ChameleonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChameleonModel, ChameleonForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (ChameleonForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": ChameleonModel, "text-generation": ChameleonForConditionalGeneration, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False def setUp(self): self.model_tester = ChameleonModelTester(self) self.config_tester = ConfigTester(self, config_class=ChameleonConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = ChameleonModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = ChameleonModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_flash_attn @require_read_token @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): """ Overwritting the common test as the test is flaky on tiny models """ model = ChameleonForConditionalGeneration.from_pretrained( "facebook/chameleon-7b", load_in_4bit=True, device_map={"": 0}, ) processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") texts = ["hi", "Hello this is a very long sentence"] processor.tokenizer.padding_side = "right" inputs = processor(texts, return_tensors="pt", padding=True).to(0) output_native = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_native = processor.tokenizer.batch_decode(output_native) model = ChameleonForConditionalGeneration.from_pretrained( "facebook/chameleon-7b", load_in_4bit=True, attn_implementation="flash_attention_2", ) output_fa_2 = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_fa_2 = processor.tokenizer.batch_decode(output_fa_2) self.assertListEqual(output_native, output_fa_2) @unittest.skip("Chameleon forces some token ids to be -inf!") def test_batching_equivalence(self): pass # TODO (joao, raushan): fix me -- the problem is in `cache_position[0] == 0`, i.e. dynamic control flow @unittest.skip("Chameleon is not compatible with end-to-end generation compilation") def test_generate_compile_fullgraph(self): pass @require_torch class ChameleonIntegrationTest(unittest.TestCase): @slow @require_bitsandbytes @require_read_token def test_model_7b(self): model = ChameleonForConditionalGeneration.from_pretrained( "facebook/chameleon-7b", load_in_4bit=True, device_map="auto" ) processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") image = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) prompt = "<image>Describe what do you see here and tell me about the history behind it?" inputs = processor(prompt, images=image, return_tensors="pt").to(model.device, torch.float16) # greedy generation outputs EXPECTED_TEXT_COMPLETION = ['Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue line extending across the center of the image. The line is labeled "390 light years" and is accompanied by a small black and'] # fmt: skip generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False) text = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow @require_bitsandbytes @require_read_token def test_model_7b_batched(self): model = ChameleonForConditionalGeneration.from_pretrained( "facebook/chameleon-7b", load_in_4bit=True, device_map="auto" ) processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") image = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) image_2 = Image.open( requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw ) prompts = [ "<image>Describe what do you see here and tell me about the history behind it?", "What constellation is this image showing?<image>", ] inputs = processor(prompts, images=[image, image_2], padding=True, return_tensors="pt").to( model.device, torch.float16 ) # greedy generation outputs EXPECTED_TEXT_COMPLETION = [ 'Describe what do you see here and tell me about the history behind it?The image depicts a star map, with a bright blue dot in the center representing the star Alpha Centauri. The star map is a representation of the night sky, showing the positions of stars in', 'What constellation is this image showing?The image is showing the constellation of Orion.' ] # fmt: skip generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False) text = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow @require_bitsandbytes @require_read_token def test_model_7b_multi_image(self): model = ChameleonForConditionalGeneration.from_pretrained( "facebook/chameleon-7b", load_in_4bit=True, device_map="auto" ) processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") image = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) image_2 = Image.open( requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw ) prompt = "What do these two images have in common?<image><image>" inputs = processor(prompt, images=[image, image_2], return_tensors="pt").to(model.device, torch.float16) # greedy generation outputs EXPECTED_TEXT_COMPLETION = ['What do these two images have in common?The two images show a connection between two things that are not necessarily related. The first image shows a group of stars, while the second image shows a network of lines connecting two points. The connection between'] # fmt: skip generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False) text = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
transformers/tests/models/chameleon/test_modeling_chameleon.py/0
{ "file_path": "transformers/tests/models/chameleon/test_modeling_chameleon.py", "repo_id": "transformers", "token_count": 8114 }
447
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch CPMAnt model.""" import unittest from transformers.testing_utils import is_torch_available, require_torch, tooslow from ...generation.test_utils import torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CpmAntConfig, CpmAntForCausalLM, CpmAntModel, CpmAntTokenizer, ) @require_torch class CpmAntModelTester: def __init__( self, parent, batch_size=2, seq_length=8, is_training=True, use_token_type_ids=False, use_input_mask=False, use_labels=False, use_mc_token_ids=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, num_buckets=32, max_distance=128, prompt_length=8, prompt_types=8, segment_types=8, init_std=0.02, return_dict=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_buckets = num_buckets self.max_distance = max_distance self.prompt_length = prompt_length self.prompt_types = prompt_types self.segment_types = segment_types self.init_std = init_std self.return_dict = return_dict def prepare_config_and_inputs(self): input_ids = {} input_ids["input_ids"] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).type(torch.int32) input_ids["use_cache"] = False config = self.get_config() return (config, input_ids) def get_config(self): return CpmAntConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, dim_ff=self.intermediate_size, position_bias_num_buckets=self.num_buckets, position_bias_max_distance=self.max_distance, prompt_types=self.prompt_types, prompt_length=self.prompt_length, segment_types=self.segment_types, use_cache=True, init_std=self.init_std, return_dict=self.return_dict, ) def create_and_check_cpmant_model(self, config, input_ids, *args): model = CpmAntModel(config=config) model.to(torch_device) model.eval() hidden_states = model(**input_ids).last_hidden_state self.parent.assertEqual(hidden_states.shape, (self.batch_size, self.seq_length, config.hidden_size)) def create_and_check_lm_head_model(self, config, input_ids, *args): model = CpmAntForCausalLM(config) model.to(torch_device) input_ids["input_ids"] = input_ids["input_ids"].to(torch_device) model.eval() model_output = model(**input_ids) self.parent.assertEqual( model_output.logits.shape, (self.batch_size, self.seq_length, config.vocab_size + config.prompt_types * config.prompt_length), ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class CpmAntModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CpmAntModel, CpmAntForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": CpmAntModel, "text-generation": CpmAntForCausalLM} if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_mismatched_shapes = False test_head_masking = False test_resize_embeddings = False def setUp(self): self.model_tester = CpmAntModelTester(self) self.config_tester = ConfigTester(self, config_class=CpmAntConfig) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): unittest.skip(reason="CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds) def test_retain_grad_hidden_states_attentions(self): unittest.skip( "CPMAnt doesn't support retain grad in hidden_states or attentions, because prompt management will peel off the output.hidden_states from graph.\ So is attentions. We strongly recommand you use loss to tune model." )(self.test_retain_grad_hidden_states_attentions) def test_cpmant_model(self): config, inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_cpmant_model(config, inputs) def test_cpmant_lm_head_model(self): config, inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(config, inputs) @require_torch class CpmAntModelIntegrationTest(unittest.TestCase): @tooslow def test_inference_masked_lm(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntModel.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).last_hidden_state expected_slice = torch.tensor( [[[6.1708, 5.9244, 1.0835], [6.5207, 6.2893, -11.3324], [-1.0107, -0.0576, -5.9577]]], ) self.assertTrue(torch.allclose(hidden_states[:, :3, :3], expected_slice, atol=1e-2)) @require_torch class CpmAntForCausalLMlIntegrationTest(unittest.TestCase): @tooslow def test_inference_casual(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).logits expected_slice = torch.tensor( [[[-6.4267, -6.4083, -6.3958], [-5.8802, -5.9447, -5.7811], [-5.3896, -5.4820, -5.4295]]], ) self.assertTrue(torch.allclose(hidden_states[:, :3, :3], expected_slice, atol=1e-2)) @tooslow def test_simple_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = "今天天气不错," expected_output = "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的" model_inputs = tokenizer(texts, return_tensors="pt") token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts) @tooslow def test_batch_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = ["今天天气不错,", "新年快乐,万事如意!"] expected_output = [ "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的", "新年快乐,万事如意!在这辞旧迎新的美好时刻,我谨代表《农村新技术》杂志社全体同仁,向一直以来关心、支持《农村新技术》杂志发展的各级领导、各界朋友和广大读者致以最诚挚的", ] model_inputs = tokenizer(texts, return_tensors="pt", padding=True) token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts)
transformers/tests/models/cpmant/test_modeling_cpmant.py/0
{ "file_path": "transformers/tests/models/cpmant/test_modeling_cpmant.py", "repo_id": "transformers", "token_count": 4314 }
448
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow Data2VecVision model.""" from __future__ import annotations import collections.abc import inspect import unittest import numpy as np from transformers import Data2VecVisionConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, ) from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class TFData2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFData2VecVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = ( self.image_size if isinstance(self.image_size, collections.abc.Iterable) else (self.image_size, self.image_size) ) patch_size = ( self.patch_size if isinstance(self.image_size, collections.abc.Iterable) else (self.patch_size, self.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = TFData2VecVisionForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFData2VecVisionForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, _, _ = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFData2VecVisionModel, TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFData2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in Data2VecVision, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Data2VecVision has a different seq_length image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Overriding this method since the base method won't be compatible with Data2VecVision. @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() label_names = {"labels"} self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = { key: val for key, val in prepared_for_class.items() if key not in label_names } self.assertGreater(len(inputs_minus_labels), 0) model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) # Overriding this method since the base method won't be compatible with Data2VecVision. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` won't have labels against which we # could compute loss. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] loss_size = tf.size(added_label) # Test that model correctly compute the loss with kwargs possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a dict _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() loss = model(**prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a tuple label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertEqual(loss.shape, [loss_size]) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/data2vec-vision-base-ft1k" model = TFData2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFData2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = TFData2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = tf.convert_to_tensor([1, 1000]) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.convert_to_tensor([0.3277, -0.1395, 0.0911]) tf.debugging.assert_near(logits[0, :3], expected_slice, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(tf.nn.top_k(outputs.logits[0], 2).indices.numpy().tolist(), expected_top2)
transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py", "repo_id": "transformers", "token_count": 9900 }
449
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class FlaxDistilBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, tie_weights_=True, ) return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxDistilBertModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxDistilBertModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("distilbert-base-uncased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class FlaxDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 768) self.assertEqual(output.shape, expected_shape) expected_slice = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/distilbert/test_modeling_flax_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_flax_distilbert.py", "repo_id": "transformers", "token_count": 2492 }
450
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DPT model.""" import unittest from transformers import Dinov2Config, DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DPTForDepthEstimation from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size # DPT's sequence length self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( backbone_config=self.get_backbone_config(), backbone=None, neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = {"depth-estimation": DPTForDepthEstimation} if is_torch_available() else {} test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_to_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "Intel/dpt-large" model = DPTForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation_dinov2(self): image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti") model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 576, 736)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[6.0433, 7.1636, 7.4268], [6.9047, 7.2471, 7.2355], [7.9261, 8.0631, 8.0244]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_beit(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-base-384").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[2669.7061, 2663.7144, 2674.9399], [2633.9326, 2650.9092, 2665.4270], [2621.8271, 2632.0129, 2637.2290]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_swinv2(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 256, 256)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[1032.7719, 1025.1886, 1030.2661], [1023.7619, 1021.0075, 1024.9121], [1022.5667, 1018.8522, 1021.4145]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py", "repo_id": "transformers", "token_count": 5379 }
451
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import FlaubertConfig, is_sacremoses_available, is_torch_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import create_sinusoidal_embeddings class FlaubertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=12, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type="last", use_proj=None, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = FlaubertForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = FlaubertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() and is_sacremoses_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # Flaubert has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) # Copied from tests/models/distilbert/test_modeling_distilbert.py with Distilbert->Flaubert def test_flaubert_model_with_sinusoidal_encodings(self): config = FlaubertConfig(sinusoidal_embeddings=True) model = FlaubertModel(config=config) sinusoidal_pos_embds = torch.empty((config.max_position_embeddings, config.emb_dim), dtype=torch.float32) create_sinusoidal_embeddings(config.max_position_embeddings, config.emb_dim, sinusoidal_pos_embds) self.model_tester.parent.assertTrue(torch.equal(model.position_embeddings.weight, sinusoidal_pos_embds)) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_flaubert_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs) def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "flaubert/flaubert_small_cased" model = FlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: self.skipTest(reason="FlauBertForMultipleChoice behaves incorrectly in JIT environments.") config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class FlaubertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/flaubert/test_modeling_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_modeling_flaubert.py", "repo_id": "transformers", "token_count": 8841 }
452
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import FunnelConfig, FunnelTokenizer, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) class FunnelModelTester: """You can also import this e.g, from .test_modeling_funnel import FunnelModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, # Set to a smaller value, so we can keep the small error threshold (1e-5) in the test num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std # Used in the tests to check the size of the first attention layer self.num_attention_heads = n_head # Used in the tests to check the size of the first hidden state self.hidden_size = self.d_model # Used in the tests to check the number of output hidden states/attentions self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) def get_config(self): return FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelBaseModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForPreTraining(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_choices = self.num_choices model = FunnelForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FunnelModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( ( FunnelModel, FunnelForMaskedLM, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (FunnelBaseModel, FunnelModel), "fill-mask": FunnelForMaskedLM, "question-answering": FunnelForQuestionAnswering, "text-classification": FunnelForSequenceClassification, "token-classification": FunnelForTokenClassification, "zero-shot": FunnelForSequenceClassification, } if is_torch_available() else {} ) # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch class FunnelBaseModelTest(ModelTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( (FunnelBaseModel, FunnelForMultipleChoice, FunnelForSequenceClassification) if is_torch_available() else () ) def setUp(self): self.model_tester = FunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) # overwrite from test_modeling_common def test_training(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ == "FunnelBaseModel": continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers class FunnelModelIntegrationTest(unittest.TestCase): def test_inference_tiny_model(self): batch_size = 13 sequence_length = 7 input_ids = torch.arange(0, batch_size * sequence_length).long().reshape(batch_size, sequence_length) lengths = [0, 1, 2, 3, 4, 5, 6, 4, 1, 3, 5, 0, 1] token_type_ids = torch.tensor([[2] + [0] * a + [1] * (sequence_length - a - 1) for a in lengths]) model = FunnelModel.from_pretrained("sgugger/funnel-random-tiny") output = model(input_ids, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2344.8352) expected_output_mean = torch.tensor(0.8052) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) attention_mask = torch.tensor([[1] * 7, [1] * 4 + [0] * 3] * 6 + [[0, 1, 1, 0, 0, 1, 1]]) output = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2343.8425) expected_output_mean = torch.tensor(0.8049) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) @slow def test_inference_model(self): tokenizer = FunnelTokenizer.from_pretrained("huggingface/funnel-small") model = FunnelModel.from_pretrained("huggingface/funnel-small") inputs = tokenizer("Hello! I am the Funnel Transformer model.", return_tensors="pt") output = model(**inputs)[0] expected_output_sum = torch.tensor(235.7246) expected_output_mean = torch.tensor(0.0256) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
transformers/tests/models/funnel/test_modeling_funnel.py/0
{ "file_path": "transformers/tests/models/funnel/test_modeling_funnel.py", "repo_id": "transformers", "token_count": 9059 }
453
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GroupViT model.""" import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig from transformers.testing_utils import is_pt_tf_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import GroupViTModel, GroupViTTextModel, GroupViTVisionModel if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class GroupViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, depths=[6, 3, 3], num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = sum(depths) self.expected_num_hidden_layers = len(depths) + 1 self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 # no [CLS] token for GroupViT self.seq_length = num_patches def prepare_config_and_inputs(self): rng = random.Random(0) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) config = self.get_config() return config, pixel_values def get_config(self): return GroupViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, depths=self.depths, num_group_tokens=self.num_group_tokens, num_output_groups=self.num_output_groups, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = GroupViTVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GroupViTVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as GROUPVIT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (GroupViTVisionModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = GroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_inputs_embeds(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import tensorflow as tf seed = 338 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) expected_num_attention_outputs = sum(g > 0 for g in self.model_tester.num_group_tokens) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(attentions), sum(g > 0 for g in self.model_tester.num_group_tokens)) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(attentions), expected_num_attention_outputs) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(self_attentions), expected_num_attention_outputs) for i, self_attn in enumerate(self_attentions): if self_attn is None: continue self.assertListEqual( list(self_attentions[i].shape[-2:]), [ self.model_tester.num_output_groups[i], self.model_tester.num_output_groups[i - 1] if i > 0 else seq_len, ], ) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass # override since the attention mask from GroupViT is not used to compute loss, thus no grad def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNone(attentions.grad) @slow def test_model_from_pretrained(self): model_name = "nvidia/groupvit-gcc-yfcc" model = GroupViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class GroupViTTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): rng = random.Random(0) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return GroupViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = GroupViTTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GroupViTTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (GroupViTTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = GroupViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="GroupViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GroupViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "nvidia/groupvit-gcc-yfcc" model = GroupViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class GroupViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = GroupViTTextModelTester(parent, **text_kwargs) self.vision_model_tester = GroupViTVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return GroupViTConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = GroupViTModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GroupViTModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": GroupViTModel} if is_torch_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = GroupViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="hidden_states are tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="input_embeds are tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="GroupViTModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # overwritten from parent as this equivalent test needs a specific `seed` and hard to get a good one! def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-5, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import tensorflow as tf seed = 163 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() # override as the `logit_scale` parameter initilization is different for GROUPVIT def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # GROUPVIT needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save GroupViTConfig and check if we can load GroupViTVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = GroupViTVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save GroupViTConfig and check if we can load GroupViTTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = GroupViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "nvidia/groupvit-gcc-yfcc" model = GroupViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class GroupViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "nvidia/groupvit-gcc-yfcc" model = GroupViTModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[13.3523, 6.3629]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
transformers/tests/models/groupvit/test_modeling_groupvit.py/0
{ "file_path": "transformers/tests/models/groupvit/test_modeling_groupvit.py", "repo_id": "transformers", "token_count": 12775 }
454
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Jamba model.""" import math import tempfile import unittest import pytest from parameterized import parameterized from transformers import AutoTokenizer, JambaConfig, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( JambaForCausalLM, JambaForSequenceClassification, JambaModel, ) from transformers.models.jamba.modeling_jamba import ( HybridMambaAttentionDynamicCache, ) class JambaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, attn_layer_offset=1, attn_layer_period=8, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.attn_layer_offset = attn_layer_offset self.attn_layer_period = attn_layer_period self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return JambaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, attn_layer_offset=self.attn_layer_offset, attn_layer_period=self.attn_layer_period, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=True, initializer_range=self.initializer_range, use_mamba_kernels=False, num_experts=2, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = JambaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = JambaForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) result = model(input_ids, attention_mask=input_mask) result = model(input_ids, labels=token_labels) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True config.add_cross_attention = True model = JambaForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass # Attention: Jamba needs the cache to be initialized to return a cache! past_key_values = HybridMambaAttentionDynamicCache( config, input_ids.shape[0], model.dtype, device=model.device ) outputs = model( input_ids, attention_mask=input_mask, past_key_values=past_key_values, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, cache_position=torch.arange( input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device ), )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = JambaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class JambaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( JambaModel, JambaForCausalLM, JambaForSequenceClassification, ) if is_torch_available() else () ) all_generative_model_classes = (JambaForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": JambaModel, "text-classification": JambaForSequenceClassification, "text-generation": JambaForCausalLM, "zero-shot": JambaForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False def setUp(self): self.model_tester = JambaModelTester(self) self.config_tester = ConfigTester(self, config_class=JambaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_casual_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_load_balancing_loss(self): r""" Let's make sure we can actually compute the loss and do a backward on it. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.num_experts = 16 config.output_router_logits = True input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(config.pad_token_id).to(torch_device) model = JambaForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask) bs, seqlen = input_ids.shape self.assertEqual(result.router_logits[0].shape, (bs * seqlen, config.num_experts)) torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2) # First, we make sure that adding padding tokens doesn't change the loss # loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding) pad_length = 1000 # Add padding tokens to input_ids padding_block = config.pad_token_id * torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to( torch_device ) padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left padded_attention_mask = padded_input_ids.ne(config.pad_token_id).to(torch_device) padded_result = model(padded_input_ids, attention_mask=padded_attention_mask) torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4) # We make sure that the loss of including padding tokens != the loss without padding tokens # if attention_mask=None --> we don't exclude padding tokens include_padding_result = model(padded_input_ids, attention_mask=None) # This is to mimic torch.testing.assert_not_close self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item()) def test_initialization(self): r""" Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "A_log" in name: A = torch.arange(1, config.mamba_d_state + 1, dtype=torch.float32)[None, :] self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5)) elif "D" in name: # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_mismatched_shapes_have_properly_initialized_weights(self): r""" Overriding the test_mismatched_shapes_have_properly_initialized_weights test because A_log and D params of the Mamba block are initialized differently and we tested that in test_initialization """ self.skipTest(reason="Cumbersome and redundant for Jamba") def test_attention_outputs(self): r""" Overriding the test_attention_outputs test as the Jamba model outputs attention only for its attention layers """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) expected_num_attentions = math.ceil( (self.model_tester.num_hidden_layers - self.model_tester.attn_layer_offset) / self.model_tester.attn_layer_period ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @require_flash_attn @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): r""" Overriding the test_flash_attn_2_fp32_ln test as the Jamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Jamba does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): r""" Overriding the test_flash_attn_2_generate_padding_right test as the Jamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ import torch for model_class in self.all_generative_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device) model.generate(dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) with self.assertRaises(ValueError): _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): r""" Overriding the test_flash_attn_2_generate_use_cache test as the Jamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ import torch max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Jamba does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): r""" Overriding the test_flash_attn_2_inference_padding_right test as the Jamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ self.skipTest(reason="Jamba flash attention does not support right padding") @unittest.skip(reason="Jamba has its own special cache type") @parameterized.expand([(1, False), (1, True), (4, False)]) def test_new_cache_format(self, num_beams, do_sample): pass @require_torch class JambaModelIntegrationTest(unittest.TestCase): model = None tokenizer = None # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): model_id = "ai21labs/Jamba-tiny-random" cls.model = JambaForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True) cls.tokenizer = AutoTokenizer.from_pretrained(model_id) if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @slow def test_simple_generate(self): # Key 9 for MI300, Key 8 for A100/A10, and Key 7 for T4. # # Note: Key 9 is currently set for MI300, but may need potential future adjustments for H100s, # considering differences in hardware processing and potential deviations in generated text. EXPECTED_TEXTS = { 7: "<|startoftext|>Hey how are you doing on this lovely evening? Canyon rins hugaughter glamour Rutgers Singh<|reserved_797|>cw algunas", 8: "<|startoftext|>Hey how are you doing on this lovely evening? Canyon rins hugaughter glamour Rutgers Singh Hebrew llam bb", 9: "<|startoftext|>Hey how are you doing on this lovely evening? Canyon rins hugaughter glamour Rutgers Singh Hebrew llam bb", } self.model.to(torch_device) input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[ "input_ids" ].to(torch_device) out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10) output_sentence = self.tokenizer.decode(out[0, :]) self.assertEqual(output_sentence, EXPECTED_TEXTS[self.cuda_compute_capability_major_version]) # TODO: there are significant differences in the logits across major cuda versions, which shouldn't exist if self.cuda_compute_capability_major_version == 8: with torch.no_grad(): logits = self.model(input_ids=input_ids).logits EXPECTED_LOGITS_NO_GRAD = torch.tensor( [ 0.0134, -0.2197, 0.0396, -0.1011, 0.0459, 0.2793, -0.1465, 0.1660, -0.2930, -0.0278, 0.0269, -0.5586, -0.2109, -0.1426, -0.1553, 0.1279, 0.0713, 0.2246, 0.1660, -0.2314, -0.1187, -0.1162, -0.1377, 0.0292, 0.1245, 0.2275, 0.0374, 0.1089, -0.1348, -0.2305, 0.1484, -0.3906, 0.1709, -0.4590, -0.0447, 0.2422, 0.1592, -0.1855, 0.2441, -0.0562 ] , dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3) @slow def test_simple_batched_generate_with_padding(self): # Key 9 for MI300, Key 8 for A100/A10, and Key 7 for T4. # # Note: Key 9 is currently set for MI300, but may need potential future adjustments for H100s, # considering differences in hardware processing and potential deviations in generated text. EXPECTED_TEXTS = { 7: [ "<|startoftext|>Hey how are you doing on this lovely evening? Canyon rins hugaughter glamour Rutgers Singh Hebrew cases Cats", "<|pad|><|pad|><|pad|><|pad|><|pad|><|pad|><|startoftext|>Tell me a storyptus Nets Madison El chamadamodern updximVaparsed", ], 8: [ "<|startoftext|>Hey how are you doing on this lovely evening? Canyon rins hugaughter glamour Rutgers Singh<|reserved_797|>cw algunas", "<|pad|><|pad|><|pad|><|pad|><|pad|><|pad|><|startoftext|>Tell me a storyptus Nets Madison El chamadamodern updximVaparsed", ], 9: [ "<|startoftext|>Hey how are you doing on this lovely evening? Canyon rins hugaughter glamour Rutgers Singh<|reserved_797|>cw algunas", "<|pad|><|pad|><|pad|><|pad|><|pad|><|pad|><|startoftext|>Tell me a storyptus Nets Madison El chamadamodern updximVaparsed", ], } self.model.to(torch_device) inputs = self.tokenizer( ["Hey how are you doing on this lovely evening?", "Tell me a story"], padding=True, return_tensors="pt" ).to(torch_device) out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10) output_sentences = self.tokenizer.batch_decode(out) self.assertEqual(output_sentences[0], EXPECTED_TEXTS[self.cuda_compute_capability_major_version][0]) self.assertEqual(output_sentences[1], EXPECTED_TEXTS[self.cuda_compute_capability_major_version][1]) # TODO: there are significant differences in the logits across major cuda versions, which shouldn't exist if self.cuda_compute_capability_major_version == 8: with torch.no_grad(): logits = self.model(input_ids=inputs["input_ids"]).logits # TODO fix logits EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor( [ 0.0166, -0.2227, 0.0396, -0.1035, 0.0459, 0.2754, -0.1445, 0.1641, -0.2910, -0.0273, 0.0227, -0.5547, -0.2139, -0.1396, -0.1582, 0.1289, 0.0713, 0.2256, 0.1699, -0.2295, -0.1182, -0.1167, -0.1387, 0.0261, 0.1270, 0.2285, 0.0403, 0.1108, -0.1318, -0.2334, 0.1455, -0.3945, 0.1729, -0.4609, -0.0410, 0.2412, 0.1572, -0.1895, 0.2402, -0.0583 ] , dtype=torch.float32) # fmt: skip EXPECTED_LOGITS_NO_GRAD_1 = torch.tensor( [ -0.1318, 0.2354, -0.4160, -0.0325, -0.0461, 0.0342, 0.2578, 0.0874, 0.1484, 0.2266, -0.1182, -0.1396, -0.1494, -0.1089, -0.0019, -0.2852, 0.1973, -0.2676, 0.0586, -0.1992, -0.2520, -0.1147, -0.1973, 0.2129, 0.0520, 0.1699, 0.1816, 0.1289, 0.1699, -0.1216, -0.2656, -0.2891, 0.2363, 0.2656, 0.0488, -0.1875, 0.2148, -0.1250, 0.1816, 0.0077 ] , dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1e-3) torch.testing.assert_close(logits[1, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_1, rtol=1e-3, atol=1e-3)
transformers/tests/models/jamba/test_modeling_jamba.py/0
{ "file_path": "transformers/tests/models/jamba/test_modeling_jamba.py", "repo_id": "transformers", "token_count": 14564 }
455
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) class LiltModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=24, num_hidden_layers=2, num_attention_heads=6, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def get_config(self): return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ): model = LiltModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ): config.num_labels = self.num_labels model = LiltForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ): model = LiltForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LiltModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = LiltModelTester(self) self.config_tester = ConfigTester(self, config_class=LiltConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "SCUT-DLVCLab/lilt-roberta-en-base" model = LiltModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch @slow class LiltModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(torch_device) input_ids = torch.tensor([[1, 2]], device=torch_device) bbox = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=torch_device) # forward pass with torch.no_grad(): outputs = model(input_ids=input_ids, bbox=bbox) expected_shape = torch.Size([1, 2, 768]) expected_slice = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=torch_device, ) self.assertTrue(outputs.last_hidden_state.shape, expected_shape) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], expected_slice, atol=1e-3))
transformers/tests/models/lilt/test_modeling_lilt.py/0
{ "file_path": "transformers/tests/models/lilt/test_modeling_lilt.py", "repo_id": "transformers", "token_count": 5330 }
456
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LongformerConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerSelfAttention, ) class LongformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.attention_window = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window + 1` locations # (assuming no token with global attention, otherwise the last dimension of attentions # is x + self.attention_window + 1, where x is the number of tokens with global attention) self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] output_without_mask = model(input_ids)["last_hidden_state"] self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4)) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() global_attention_mask = input_mask.clone() global_attention_mask[:, input_mask.shape[-1] // 2] = 0 global_attention_mask = global_attention_mask.to(torch_device) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, global_attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = LongformerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs global_attention_mask = torch.zeros_like(input_ids) global_attention_mask[:, -1] = 1 inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs # Replace sep_token_id by some random id input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item() # Make sure there are exactly three sep_token_id input_ids[:, -3:] = config.sep_token_id input_mask = torch.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_torch class LongformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False # pruning is not supported test_torchscript = False all_model_classes = ( ( LongformerModel, LongformerForMaskedLM, LongformerForSequenceClassification, LongformerForQuestionAnswering, LongformerForTokenClassification, LongformerForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LongformerModel, "fill-mask": LongformerForMaskedLM, "question-answering": LongformerForQuestionAnswering, "text-classification": LongformerForSequenceClassification, "token-classification": LongformerForTokenClassification, "zero-shot": LongformerForSequenceClassification, } if is_torch_available() else {} ) # Need to use `0.6` instead of `0.5` for `test_disk_offload` model_split_percents = [0.6, 0.7, 0.9] # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = LongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @unittest.skip(reason="Longformer cannot keep gradients in attention or hidden states") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="LongFormer calculates global attn only when attn_mask has non-zero elements") def test_batching_equivalence(self): return @require_torch @require_sentencepiece @require_tokenizers class LongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return torch.tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=torch.float32, device=torch_device, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4 chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = chunked_hidden_states.shape[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1) # first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000] self.assertTrue(torch.allclose(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], atol=1e-3)) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, 0, 4:], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) # last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629] self.assertTrue(torch.allclose(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], atol=1e-3)) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, -1, :3], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(hidden_states.shape, (1, 4, 8)) padding = (0, 0, 0, 1) padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding) self.assertEqual(padded_hidden_states.shape, (1, 8, 5)) expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32) self.assertTrue(torch.allclose(expected_added_dim, padded_hidden_states[0, -1, :], atol=1e-6)) self.assertTrue(torch.allclose(hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], atol=1e-6)) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) # expected slices across chunk and seq length dim expected_slice_along_seq_length = torch.tensor( [0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32 ) expected_slice_along_chunk = torch.tensor( [0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32 ) self.assertTrue(torch.allclose(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, atol=1e-3)) self.assertTrue(torch.allclose(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, atol=1e-3)) self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4)) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1) self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8) hid_states_2 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2) self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24) hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3] LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2) self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24) hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :] LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2) self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12) def test_layer_local_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[:, -2:] = -10000 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (1, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 1], torch.tensor( [0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_global_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 2], torch.tensor( [-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( output_hidden_states[1, -2], torch.tensor( [-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_attn_probs(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states, local_attentions, global_attentions = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=True, ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) # All tokens with global attention have weight 0 in local attentions. self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0)) self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0)) # The weight of all tokens with local attention must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( local_attentions[0, 0, 0, :], torch.tensor( [0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( local_attentions[1, 0, 0, :], torch.tensor( [0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) # All the global attention weights must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( global_attentions[0, 0, 1, :], torch.tensor( [0.2500, 0.2500, 0.2500, 0.2500], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( global_attentions[1, 0, 0, :], torch.tensor( [0.2497, 0.2500, 0.2499, 0.2504], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) @slow def test_inference_no_head(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world!' input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device) self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4)) self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4)) @slow def test_inference_no_head_long(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = torch.tensor(74585.8594, device=torch_device) expected_output_mean = torch.tensor(0.0243, device=torch_device) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) @slow def test_inference_masked_lm_long(self): model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input input_ids = input_ids.to(torch_device) loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple() expected_loss = torch.tensor(0.0074, device=torch_device) expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device) expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device) self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4)) self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4)) self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))
transformers/tests/models/longformer/test_modeling_longformer.py/0
{ "file_path": "transformers/tests/models/longformer/test_modeling_longformer.py", "repo_id": "transformers", "token_count": 15468 }
457
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Mask2Former model.""" import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import Mask2FormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Mask2FormerForUniversalSegmentation, Mask2FormerModel if is_vision_available(): from transformers import Mask2FormerImageProcessor if is_vision_available(): from PIL import Image class Mask2FormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 8, max_size=32 * 8, num_labels=4, hidden_dim=64, num_attention_heads=4, num_hidden_layers=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.hidden_dim = hidden_dim self.mask_feature_size = hidden_dim self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def get_config(self): config = Mask2FormerConfig( hidden_size=self.hidden_dim, num_attention_heads=self.num_attention_heads, num_hidden_layers=self.num_hidden_layers, encoder_feedforward_dim=16, dim_feedforward=32, num_queries=self.num_queries, num_labels=self.num_labels, decoder_layers=2, encoder_layers=2, feature_size=16, ) config.num_queries = self.num_queries config.num_labels = self.num_labels config.backbone_config.embed_dim = 16 config.backbone_config.depths = [1, 1, 1, 1] config.backbone_config.hidden_size = 16 config.backbone_config.num_channels = self.num_channels config.backbone_config.num_heads = [1, 1, 2, 2] config.backbone = None config.hidden_dim = self.hidden_dim config.mask_feature_size = self.hidden_dim config.feature_size = self.hidden_dim return config def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers) def create_and_check_mask2former_model(self, config, pixel_values, pixel_mask, output_hidden_states=False): with torch.no_grad(): model = Mask2FormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, pixel_mask=pixel_mask) output = model(pixel_values, output_hidden_states=True) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_mask2former_instance_segmentation_head_model( self, config, pixel_values, pixel_mask, mask_labels, class_labels ): model = Mask2FormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) comm_check_on_output(result) result = model( pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([])) @require_torch class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else () pipeline_model_mapping = {"image-feature-extraction": Mask2FormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = Mask2FormerModelTester(self) self.config_tester = ConfigTester(self, config_class=Mask2FormerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_mask2former_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=False) def test_mask2former_instance_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mask2former_instance_segmentation_head_model(*config_and_inputs) @unittest.skip(reason="Mask2Former does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Mask2Former is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Mask2Former does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass @slow def test_model_from_pretrained(self): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: model = Mask2FormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } config = self.model_tester.get_config() model = Mask2FormerForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() model = model_class(config) model.to(torch_device) model.train() loss = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels).loss loss.backward() def test_retain_grad_hidden_states_attentions(self): model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True model = model_class(config).to(torch_device) model.train() outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() attentions = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class Mask2FormerModelIntegrationTest(unittest.TestCase): @cached_property def model_checkpoints(self): return "facebook/mask2former-swin-small-coco-instance" @cached_property def default_image_processor(self): return Mask2FormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def test_inference_no_head(self): model = Mask2FormerModel.from_pretrained(self.model_checkpoints).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 384, 384)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) def test_inference_universal_segmentation_head(self): model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 384, 384)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) expected_slice = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1)) expected_slice = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) .to(torch_device, dtype=torch.float16) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor inputs = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None)
transformers/tests/models/mask2former/test_modeling_mask2former.py/0
{ "file_path": "transformers/tests/models/mask2former/test_modeling_mask2former.py", "repo_id": "transformers", "token_count": 8457 }
458
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MVP model.""" import copy import tempfile import unittest import timeout_decorator # noqa from transformers import MvpConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpTokenizer, ) from transformers.models.mvp.modeling_mvp import MvpDecoder, MvpEncoder, shift_tokens_right def prepare_mvp_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MvpModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_mvp_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return MvpConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = MvpModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = MvpModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = MvpEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = MvpDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class MvpHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = MvpConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_sequence_classification_forward(self): config, input_ids, batch_size = self._get_config_and_data() labels = _long_tensor([2] * batch_size).to(torch_device) config.num_labels = 3 model = MvpForSequenceClassification(config) model.to(torch_device) outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels) expected_shape = torch.Size((batch_size, config.num_labels)) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_question_answering_forward(self): config, input_ids, batch_size = self._get_config_and_data() sequence_labels = ids_tensor([batch_size], 2).to(torch_device) model = MvpForQuestionAnswering(config) model.to(torch_device) outputs = model( input_ids=input_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.assertEqual(outputs["start_logits"].shape, input_ids.shape) self.assertEqual(outputs["end_logits"].shape, input_ids.shape) self.assertIsInstance(outputs["loss"].item(), float) @timeout_decorator.timeout(1) def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device) lm_model = MvpForConditionalGeneration(config) lm_model.to(torch_device) outputs = lm_model(input_ids=input_ids, labels=lm_labels) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_lm_uneven_forward(self): config = MvpConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = MvpForConditionalGeneration(config).to(torch_device) context = torch.tensor( [[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long ) summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long) outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long) config = MvpConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) lm_model = MvpForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 generated_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @slow def test_tokenization(self): tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") examples = [" Hello world", " DomDramg"] # need leading spaces for equality fairseq_results = [ torch.tensor([0, 20920, 232, 2]), torch.tensor([0, 11349, 495, 4040, 571, 2]), ] for ex, desired_result in zip(examples, fairseq_results): mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = MvpForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_resize_tokens_embeddings_more(self): config, input_ids, _ = self._get_config_and_data() def _get_embs(m): return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone()) model = MvpForConditionalGeneration(config).eval().to(torch_device) input, output = _get_embs(model) self.assertTrue(torch.eq(input, output).all()) new_vocab_size = 45 model.resize_token_embeddings(new_vocab_size) input_new, output_new = _get_embs(model) self.assertEqual(input_new.shape, (new_vocab_size, config.d_model)) self.assertEqual(output_new.shape, (new_vocab_size, config.d_model)) self.assertTrue(torch.eq(input_new, output_new).all()) @require_torch class MvpModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (MvpModel, MvpForConditionalGeneration, MvpForSequenceClassification, MvpForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (MvpForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": MvpModel, "fill-mask": MvpForConditionalGeneration, "question-answering": MvpForQuestionAnswering, "summarization": MvpForConditionalGeneration, "text-classification": MvpForSequenceClassification, "text-generation": MvpForCausalLM, "text2text-generation": MvpForConditionalGeneration, "translation": MvpForConditionalGeneration, "zero-shot": MvpForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = MvpModelTester(self) self.config_tester = ConfigTester(self, config_class=MvpConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # MvpForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MvpModel, MvpForConditionalGeneration, MvpForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @require_sentencepiece @require_tokenizers class MvpModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return MvpTokenizer.from_pretrained("RUCAIBox/mvp") @slow def test_inference_no_head(self): model = MvpModel.from_pretrained("RUCAIBox/mvp").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = input_ids.ne(model.config.pad_token_id) with torch.no_grad(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.3461, 0.3624, 0.2689], [0.3461, 0.3624, 0.2689], [-0.1562, 1.1637, -0.3784]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3)) @slow def test_summarization_inference(self): model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'""" # fmt: skip EXPECTED_SUMMARY = "Listen to the radio.\nUse the Internet." dct = tok.batch_encode_plus( [PGE_ARTICLE], return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate(**dct) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True) self.assertEqual(EXPECTED_SUMMARY, decoded[0]) class MvpStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = MvpConfig( vocab_size=self.vocab_size, d_model=self.d_model, encoder_layers=self.decoder_layers, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = MvpDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = MvpDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class MvpStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MvpDecoder, MvpForCausalLM) if is_torch_available() else () all_generative_model_classes = (MvpForCausalLM,) if is_torch_available() else () fx_comptatible = True test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = MvpStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MvpConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return
transformers/tests/models/mvp/test_modeling_mvp.py/0
{ "file_path": "transformers/tests/models/mvp/test_modeling_mvp.py", "repo_id": "transformers", "token_count": 15188 }
459
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle, prepare_metadata from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image class OneFormerImageProcessorTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, size=None, do_resize=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], num_labels=10, do_reduce_labels=False, ignore_index=255, repo_path="shi-labs/oneformer_demo", class_info_file="ade20k_panoptic.json", num_text=10, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.class_info_file = class_info_file self.num_text = num_text self.repo_path = repo_path # for the post_process_functions self.batch_size = 2 self.num_queries = 10 self.num_classes = 10 self.height = 3 self.width = 4 self.num_labels = num_labels self.do_reduce_labels = do_reduce_labels self.ignore_index = ignore_index def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "num_text": self.num_text, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to OneFormerImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def get_fake_oneformer_outputs(self): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)), ) def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class OneFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string image_processing_class = image_processing_class def setUp(self): super().setUp() self.image_processor_tester = OneFormerImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_proc_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "ignore_index")) self.assertTrue(hasattr(image_processor, "class_info_file")) self.assertTrue(hasattr(image_processor, "num_text")) self.assertTrue(hasattr(image_processor, "repo_path")) self.assertTrue(hasattr(image_processor, "metadata")) self.assertTrue(hasattr(image_processor, "do_reduce_labels")) def comm_get_image_processor_inputs( self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np" ): image_processor = self.image_processing_class(**self.image_processor_dict) # prepare image and target num_labels = self.image_processor_tester.num_labels annotations = None instance_id_to_semantic_id = None image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) if with_segmentation_maps: high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] if segmentation_type == "pil": annotations = [Image.fromarray(annotation) for annotation in annotations] inputs = image_processor( image_inputs, ["semantic"] * len(image_inputs), annotations, return_tensors="pt", instance_id_to_semantic_id=instance_id_to_semantic_id, pad_and_return_pixel_mask=True, ) return inputs @unittest.skip def test_init_without_params(self): pass def test_call_with_segmentation_maps(self): def common(is_instance_map=False, segmentation_type=None): inputs = self.comm_get_image_processor_inputs( with_segmentation_maps=True, is_instance_map=is_instance_map, segmentation_type=segmentation_type ) mask_labels = inputs["mask_labels"] class_labels = inputs["class_labels"] pixel_values = inputs["pixel_values"] text_inputs = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(mask_labels, class_labels, text_inputs): self.assertEqual(mask_label.shape[0], class_label.shape[0]) # this ensure padding has happened self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:]) self.assertEqual(len(text_input), self.image_processor_tester.num_text) common() common(is_instance_map=True) common(is_instance_map=False, segmentation_type="pil") common(is_instance_map=True, segmentation_type="pil") def test_binary_mask_to_rle(self): fake_binary_mask = np.zeros((20, 50)) fake_binary_mask[0, 20:] = 1 fake_binary_mask[1, :15] = 1 fake_binary_mask[5, :10] = 1 rle = binary_mask_to_rle(fake_binary_mask) self.assertEqual(len(rle), 4) self.assertEqual(rle[0], 21) self.assertEqual(rle[1], 45) def test_post_process_semantic_segmentation(self): fature_extractor = self.image_processing_class( num_labels=self.image_processor_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processor_tester.num_text, repo_path="shi-labs/oneformer_demo", ) outputs = self.image_processor_tester.get_fake_oneformer_outputs() segmentation = fature_extractor.post_process_semantic_segmentation(outputs) self.assertEqual(len(segmentation), self.image_processor_tester.batch_size) self.assertEqual( segmentation[0].shape, ( self.image_processor_tester.height, self.image_processor_tester.width, ), ) target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)] segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes) self.assertEqual(segmentation[0].shape, target_sizes[0]) def test_post_process_instance_segmentation(self): image_processor = self.image_processing_class( num_labels=self.image_processor_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processor_tester.num_text, repo_path="shi-labs/oneformer_demo", ) outputs = self.image_processor_tester.get_fake_oneformer_outputs() segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) segmentation_with_opts = image_processor.post_process_instance_segmentation( outputs, threshold=0, target_sizes=[(1, 4) for _ in range(self.image_processor_tester.batch_size)], task_type="panoptic", ) self.assertTrue(len(segmentation_with_opts) == self.image_processor_tester.batch_size) for el in segmentation_with_opts: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual(el["segmentation"].shape, (1, 4)) def test_post_process_panoptic_segmentation(self): image_processor = self.image_processing_class( num_labels=self.image_processor_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processor_tester.num_text, repo_path="shi-labs/oneformer_demo", ) outputs = self.image_processor_tester.get_fake_oneformer_outputs() segmentation = image_processor.post_process_panoptic_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) def test_can_load_with_local_metadata(self): # Create a temporary json file class_info = { "0": {"isthing": 0, "name": "foo"}, "1": {"isthing": 0, "name": "bar"}, "2": {"isthing": 1, "name": "baz"}, } metadata = prepare_metadata(class_info) with tempfile.TemporaryDirectory() as tmpdirname: metadata_path = os.path.join(tmpdirname, "metadata.json") with open(metadata_path, "w") as f: json.dump(class_info, f) config_dict = self.image_processor_dict config_dict["class_info_file"] = metadata_path config_dict["repo_path"] = tmpdirname image_processor = self.image_processing_class(**config_dict) self.assertEqual(image_processor.metadata, metadata) def test_removed_deprecated_kwargs(self): image_processor_dict = dict(self.image_processor_dict) image_processor_dict.pop("do_reduce_labels", None) image_processor_dict["reduce_labels"] = True # test we are able to create the image processor with the deprecated kwargs image_processor = self.image_processing_class(**image_processor_dict) self.assertEqual(image_processor.do_reduce_labels, True) # test we still support reduce_labels with config image_processor = self.image_processing_class.from_dict(image_processor_dict) self.assertEqual(image_processor.do_reduce_labels, True)
transformers/tests/models/oneformer/test_image_processing_oneformer.py/0
{ "file_path": "transformers/tests/models/oneformer/test_image_processing_oneformer.py", "repo_id": "transformers", "token_count": 6757 }
460
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OwlViT model.""" import inspect import os import tempfile import unittest import numpy as np import requests from transformers import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import OwlViTForObjectDetection, OwlViTModel, OwlViTTextModel, OwlViTVisionModel if is_vision_available(): from PIL import Image from transformers import OwlViTProcessor class OwlViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return OwlViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = OwlViTVisionModel(config=config).to(torch_device) model.eval() pixel_values = pixel_values.to(torch.float32) with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class OwlViTVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as OWLVIT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (OwlViTVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = OwlViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=OwlViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="OWLVIT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "google/owlvit-base-patch32" model = OwlViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class OwlViTTextModelTester: def __init__( self, parent, batch_size=12, num_queries=4, seq_length=16, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=12, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=16, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_queries = num_queries self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_queries, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size * self.num_queries, self.seq_length]) if input_mask is not None: num_text, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(num_text,)) for idx, start_index in enumerate(rnd_start_indices): input_mask[idx, :start_index] = 1 input_mask[idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return OwlViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = OwlViTTextModel(config=config).to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=input_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size * self.num_queries, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_queries, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class OwlViTTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (OwlViTTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = OwlViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=OwlViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="OWLVIT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="OwlViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OwlViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "google/owlvit-base-patch32" model = OwlViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class OwlViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = OwlViTTextModelTester(parent, **text_kwargs) self.vision_model_tester = OwlViTVisionModelTester(parent, **vision_kwargs) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return OwlViTConfig.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = OwlViTModel(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, ) image_logits_size = ( self.vision_model_tester.batch_size, self.text_model_tester.batch_size * self.text_model_tester.num_queries, ) text_logits_size = ( self.text_model_tester.batch_size * self.text_model_tester.num_queries, self.vision_model_tester.batch_size, ) self.parent.assertEqual(result.logits_per_image.shape, image_logits_size) self.parent.assertEqual(result.logits_per_text.shape, text_logits_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "return_loss": False, } return config, inputs_dict @require_torch class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OwlViTModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": OwlViTModel, "zero-shot-object-detection": OwlViTForObjectDetection, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = OwlViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="OwlViTModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initilization is different for OWLVIT def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLVIT needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save OwlViTConfig and check if we can load OwlViTVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = OwlViTVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save OwlViTConfig and check if we can load OwlViTTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = OwlViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "google/owlvit-base-patch32" model = OwlViTModel.from_pretrained(model_name) self.assertIsNotNone(model) class OwlViTForObjectDetectionTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = OwlViTTextModelTester(parent) self.vision_model_tester = OwlViTVisionModelTester(parent) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values, input_ids, attention_mask def get_config(self): return OwlViTConfig.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64) def create_and_check_model(self, config, pixel_values, input_ids, attention_mask): model = OwlViTForObjectDetection(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, return_dict=True, ) pred_boxes_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_logits_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_class_embeds_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, self.text_model_tester.hidden_size, ) self.parent.assertEqual(result.pred_boxes.shape, pred_boxes_size) self.parent.assertEqual(result.logits.shape, pred_logits_size) self.parent.assertEqual(result.class_embeds.shape, pred_class_embeds_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, input_ids, attention_mask = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class OwlViTForObjectDetectionTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (OwlViTForObjectDetection,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = OwlViTForObjectDetectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="OwlViTModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Test_initialization is tested in individual model tests") def test_initialization(self): pass @unittest.skip(reason="Test_forward_signature is tested in individual model tests") def test_forward_signature(self): pass @unittest.skip(reason="Test_save_load_fast_init_from_base is tested in individual model tests") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @unittest.skip(reason="OWL-ViT does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLVIT needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class OwlViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/owlvit-base-patch32" model = OwlViTModel.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[3.4613, 0.9403]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) @slow def test_inference_object_detection(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow def test_inference_one_shot_object_detection(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow @require_torch_accelerator @require_torch_fp16 def test_inference_one_shot_object_detection_fp16(self): model_name = "google/owlvit-base-patch32" model = OwlViTForObjectDetection.from_pretrained(model_name, torch_dtype=torch.float16).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) # No need to check the logits, we just check inference runs fine. num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
transformers/tests/models/owlvit/test_modeling_owlvit.py/0
{ "file_path": "transformers/tests/models/owlvit/test_modeling_owlvit.py", "repo_id": "transformers", "token_count": 15302 }
461
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Perceiver model.""" import copy import inspect import math import tempfile import unittest import warnings from typing import Dict, List, Tuple import numpy as np from datasets import load_dataset from transformers import PerceiverConfig from transformers.testing_utils import ( IS_ROCM_SYSTEM, require_torch, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverModel, PerceiverTokenizer, ) from transformers.models.auto.modeling_auto import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) if is_vision_available(): from PIL import Image from transformers import PerceiverImageProcessor class PerceiverModelTester: def __init__( self, parent, batch_size=13, seq_length=7, num_channels=3, image_size=32, train_size=[20, 20], num_frames=5, audio_samples_per_frame=200, samples_per_patch=20, nchunks=20, num_latents=10, d_latents=20, d_model=64, num_blocks=1, num_self_attends_per_block=2, num_self_attention_heads=1, num_cross_attention_heads=1, self_attention_widening_factor=4, cross_attention_widening_factor=4, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_act="gelu", attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=7, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.num_channels = num_channels self.image_size = image_size self.train_size = train_size self.num_frames = num_frames self.audio_samples_per_frame = audio_samples_per_frame self.samples_per_patch = samples_per_patch self.nchunks = nchunks self.num_latents = num_latents self.d_latents = d_latents self.d_model = d_model self.num_blocks = num_blocks self.num_self_attends_per_block = num_self_attends_per_block self.num_self_attention_heads = num_self_attention_heads self.num_cross_attention_heads = num_cross_attention_heads self.self_attention_widening_factor = self_attention_widening_factor self.cross_attention_widening_factor = cross_attention_widening_factor self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_act = hidden_act self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope # set subsampling for multimodal model (take first chunk) image_chunk_size = np.prod((self.num_frames, self.image_size, self.image_size)) // self.nchunks audio_chunk_size = self.num_frames * self.audio_samples_per_frame // self.samples_per_patch // self.nchunks self.subsampling = { "image": torch.arange(0, image_chunk_size), "audio": torch.arange(0, audio_chunk_size), "label": None, } def prepare_config_and_inputs(self, model_class=None): config = self.get_config() input_mask = None sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.num_labels) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) if model_class is None or model_class.__name__ == "PerceiverModel": inputs = floats_tensor([self.batch_size, self.seq_length, config.d_model], scale=1.0) return config, inputs, input_mask, sequence_labels, token_labels elif model_class.__name__ in ["PerceiverForMaskedLM", "PerceiverForSequenceClassification"]: inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) # input mask is only relevant for text inputs if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) elif model_class.__name__ == "PerceiverForImageClassificationLearned": inputs = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) elif model_class.__name__ == "PerceiverForImageClassificationFourier": inputs = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) elif model_class.__name__ == "PerceiverForImageClassificationConvProcessing": inputs = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) elif model_class.__name__ == "PerceiverForOpticalFlow": inputs = floats_tensor([self.batch_size, 2, 27, self.train_size[0], self.train_size[1]]) elif model_class.__name__ == "PerceiverForMultimodalAutoencoding": images = torch.randn( (self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size), device=torch_device, ) audio = torch.randn( (self.batch_size, self.num_frames * self.audio_samples_per_frame, 1), device=torch_device ) inputs = { "image": images, "audio": audio, "label": torch.zeros((self.batch_size, self.num_labels), device=torch_device), } else: raise ValueError(f"Model class {model_class} not supported") return config, inputs, input_mask, sequence_labels, token_labels def get_config(self): return PerceiverConfig( num_latents=self.num_latents, d_latents=self.d_latents, d_model=self.d_model, qk_channels=self.d_latents, v_channels=self.d_latents, num_blocks=self.num_blocks, num_self_attends_per_block=self.num_self_attends_per_block, num_self_attention_heads=self.num_self_attention_heads, num_cross_attention_heads=self.num_cross_attention_heads, self_attention_widening_factor=self.self_attention_widening_factor, cross_attention_widening_factor=self.cross_attention_widening_factor, vocab_size=self.vocab_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, max_position_embeddings=self.max_position_embeddings, image_size=self.image_size, train_size=self.train_size, num_frames=self.num_frames, audio_samples_per_frame=self.audio_samples_per_frame, samples_per_patch=self.samples_per_patch, num_labels=self.num_labels, output_num_channels=32, _label_trainable_num_channels=16, ) def get_pipeline_config(self): config = self.get_config() # Byte level vocab config.vocab_size = 261 config.max_position_embeddings = 40 return config def create_and_check_for_masked_lm(self, config, inputs, input_mask, sequence_labels, token_labels): model = PerceiverForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(inputs, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification(self, config, inputs, input_mask, sequence_labels, token_labels): model = PerceiverForSequenceClassification(config=config) model.to(torch_device) model.eval() result = model(inputs, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_image_classification_learned( self, config, inputs, input_mask, sequence_labels, token_labels ): model = PerceiverForImageClassificationLearned(config=config) model.to(torch_device) model.eval() result = model(inputs, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_image_classification_fourier( self, config, inputs, input_mask, sequence_labels, token_labels ): model = PerceiverForImageClassificationFourier(config=config) model.to(torch_device) model.eval() result = model(inputs, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_image_classification_conv( self, config, inputs, input_mask, sequence_labels, token_labels ): model = PerceiverForImageClassificationConvProcessing(config=config) model.to(torch_device) model.eval() result = model(inputs, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, inputs, input_mask, sequence_labels, token_labels = config_and_inputs inputs_dict = {"inputs": inputs, "attention_mask": input_mask} return config, inputs_dict def prepare_config_and_inputs_for_model_class(self, model_class): config_and_inputs = self.prepare_config_and_inputs(model_class) config, inputs, input_mask, sequence_labels, token_labels = config_and_inputs inputs_dict = {"inputs": inputs, "attention_mask": input_mask} return config, inputs_dict @require_torch class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( PerceiverModel, PerceiverForMaskedLM, PerceiverForImageClassificationLearned, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForOpticalFlow, PerceiverForMultimodalAutoencoding, PerceiverForSequenceClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": PerceiverModel, "fill-mask": PerceiverForMaskedLM, "image-classification": ( PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, ), "text-classification": PerceiverForSequenceClassification, "zero-shot": PerceiverForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False test_torchscript = False maxDiff = None def setUp(self): self.model_tester = PerceiverModelTester(self) self.config_tester = ConfigTester( self, config_class=PerceiverConfig, hidden_size=37, common_properties=["d_model", "num_self_attention_heads", "num_cross_attention_heads"], ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ == "PerceiverForMultimodalAutoencoding": inputs_dict["subsampled_output_points"] = self.model_tester.subsampling if return_labels: if model_class.__name__ in [ *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(), "PerceiverForImageClassificationLearned", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationConvProcessing", *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES.values(), *MODEL_FOR_MASKED_LM_MAPPING_NAMES.values(), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(model_class=PerceiverForMaskedLM) self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(model_class=PerceiverForSequenceClassification) self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_image_classification_learned(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( model_class=PerceiverForImageClassificationLearned ) self.model_tester.create_and_check_for_image_classification_learned(*config_and_inputs) def test_for_image_classification_fourier(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( model_class=PerceiverForImageClassificationFourier ) self.model_tester.create_and_check_for_image_classification_fourier(*config_and_inputs) def test_for_image_classification_conv(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( model_class=PerceiverForImageClassificationConvProcessing ) self.model_tester.create_and_check_for_image_classification_conv(*config_and_inputs) def test_model_get_set_embeddings(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) model = model_class(config) # we overwrite this, as the embeddings of Perceiver are an instance of nn.Parameter # and Perceiver doesn't support get_output_embeddings self.assertIsInstance(model.get_input_embeddings(), (nn.Parameter)) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") for model_class in self.all_model_classes: if model_class.__name__ in [ *MODEL_MAPPING_NAMES.values(), "PerceiverForOpticalFlow", "PerceiverForMultimodalAutoencoding", ]: continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_forward_signature(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["inputs"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_determinism(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): inputs_dict = self._prepare_for_class(inputs_dict, model_class) first = model(**inputs_dict)[0] second = model(**inputs_dict)[0] if model_class.__name__ == "PerceiverForMultimodalAutoencoding": # model outputs a dictionary with logits per modality, let's verify each modality for modality in first.keys(): out_1 = first[modality].cpu().numpy() out_2 = second[modality].cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) else: out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_attention_outputs(self): seq_len = getattr(self.model_tester, "num_latents", None) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) config.return_dict = True inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self_attentions = outputs.attentions cross_attentions = outputs.cross_attentions # check expected number of attentions depending on model class expected_num_self_attentions = self.model_tester.num_blocks * self.model_tester.num_self_attends_per_block if model.__class__.__name__ == "PerceiverModel": # we expect to have 2 cross-attentions, namely one in the PerceiverEncoder, and one in PerceiverBasicDecoder expected_num_cross_attentions = 1 else: # we expect to have 2 cross-attentions, namely one in the PerceiverEncoder, and one in PerceiverBasicDecoder expected_num_cross_attentions = 2 self.assertEqual(len(self_attentions), expected_num_self_attentions) self.assertEqual(len(cross_attentions), expected_num_cross_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self_attentions = outputs.attentions cross_attentions = outputs.cross_attentions self.assertEqual(len(self_attentions), expected_num_self_attentions) self.assertEqual(len(cross_attentions), expected_num_cross_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_self_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_self_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_self_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_blocks * self.model_tester.num_self_attends_per_block + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.num_latents self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.d_latents], ) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model_outputs_equivalence(self): def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) if model_class.__name__ not in ["PerceiverForOpticalFlow", "PerceiverForMultimodalAutoencoding"]: # optical flow + multimodal models don't support training for now tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) if model_class.__name__ not in ["PerceiverForOpticalFlow", "PerceiverForMultimodalAutoencoding"]: # optical flow + multimodal models don't support training for now tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if model_class.__name__ not in ["PerceiverForOpticalFlow", "PerceiverForMultimodalAutoencoding"]: # optical flow + multimodal models don't support training for now tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) if model_class.__name__ not in ["PerceiverForOpticalFlow", "PerceiverForMultimodalAutoencoding"]: # optical flow + multimodal models don't support training for now tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_retain_grad_hidden_states_attentions(self): # no need to test all models as different heads yield the same functionality model_class = PerceiverForMaskedLM config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) config.output_hidden_states = True config.output_attentions = True model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-only model hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): for model_class in self.all_model_classes: original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) torch.manual_seed(0) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) config.chunk_size_feed_forward = 1 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] if model_class.__name__ == "PerceiverForMultimodalAutoencoding": # model outputs a dictionary with logits for each modality for modality in hidden_states_no_chunk.keys(): self.assertTrue( torch.allclose(hidden_states_no_chunk[modality], hidden_states_with_chunk[modality], atol=1e-3) ) else: self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) def test_save_load(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "PerceiverForMultimodalAutoencoding": for modality in outputs[0].keys(): out_2 = outputs[0][modality].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): after_outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Make sure we don't have nans out_1 = after_outputs[0][modality].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) else: out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): after_outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Make sure we don't have nans out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_correct_missing_keys(self): if not self.test_missing_keys: self.skipTest(reason="test_missing_keys is set to False") config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # most Perceiver models don't have a typical head like is the case with BERT if model_class.__name__ in [ "PerceiverForOpticalFlow", "PerceiverForMultimodalAutoencoding", *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(), "PerceiverForImageClassificationLearned", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationConvProcessing", *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(), ]: continue model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) with self.subTest(msg=f"Missing keys for {model.__class__.__name__}"): self.assertGreater(len(loading_info["missing_keys"]), 0) def test_problem_types(self): problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(): continue config, inputs, input_mask, _, _ = self.model_tester.prepare_config_and_inputs(model_class=model_class) inputs_dict = {"inputs": inputs, "attention_mask": input_mask} for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @require_torch_multi_gpu @unittest.skip( reason=( "Perceiver does not work with data parallel (DP) because of a bug in PyTorch:" " https://github.com/pytorch/pytorch/issues/36035" ) ) def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Perceiver models don't have a typical head like is the case with BERT") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Perceiver models don't have a typical head like is the case with BERT") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Perceiver doesn't support resize_token_embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Perceiver doesn't support resize_token_embeddings") def test_resize_embeddings_untied(self): pass @unittest.skip(reason="Perceiver doesn't support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Perceiver doesn't support the AutoModel API") def test_load_with_mismatched_shapes(self): pass @slow def test_model_from_pretrained(self): model_name = "deepmind/language-perceiver" model = PerceiverModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image # Helper functions for optical flow integration test def prepare_optical_flow_images(): dataset = load_dataset("hf-internal-testing/fixtures_sintel", split="test", trust_remote_code=True) image1 = Image.open(dataset[0]["file"]).convert("RGB") image2 = Image.open(dataset[0]["file"]).convert("RGB") return image1, image2 def normalize(img): return img / 255.0 * 2 - 1 def extract_image_patches(x, kernel, stride=1, dilation=1): # Do TF 'SAME' Padding b, c, h, w = x.shape h2 = math.ceil(h / stride) w2 = math.ceil(w / stride) pad_row = (h2 - 1) * stride + (kernel - 1) * dilation + 1 - h pad_col = (w2 - 1) * stride + (kernel - 1) * dilation + 1 - w x = torch.nn.functional.pad(x, (pad_row // 2, pad_row - pad_row // 2, pad_col // 2, pad_col - pad_col // 2)) # Extract patches patches = x.unfold(2, kernel, stride).unfold(3, kernel, stride) patches = patches.permute(0, 4, 5, 1, 2, 3).contiguous() return patches.view(b, -1, patches.shape[-2], patches.shape[-1]) @require_torch @require_vision class PerceiverModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): tokenizer = PerceiverTokenizer.from_pretrained("deepmind/language-perceiver") model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver") model.to(torch_device) # prepare inputs text = "This is an incomplete sentence where some words are missing." encoding = tokenizer(text, padding="max_length", return_tensors="pt") # mask " missing.". encoding.input_ids[0, 52:61] = tokenizer.mask_token_id inputs, input_mask = encoding.input_ids.to(torch_device), encoding.attention_mask.to(torch_device) # forward pass with torch.no_grad(): outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits # verify logits expected_shape = torch.Size((1, tokenizer.model_max_length, len(tokenizer))) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [[-10.8609, -10.7651, -10.9187], [-12.1689, -11.9389, -12.1479], [-12.1518, -11.9707, -12.2073]], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4)) expected_greedy_predictions = [38, 115, 111, 121, 121, 111, 116, 109, 52] masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist() self.assertListEqual(expected_greedy_predictions, masked_tokens_predictions) @slow def test_inference_image_classification(self): image_processor = PerceiverImageProcessor() model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned") model.to(torch_device) # prepare inputs image = prepare_img() inputs = image_processor(image, return_tensors="pt").pixel_values.to(torch_device) input_mask = None # forward pass with torch.no_grad(): outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits # verify logits expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([-1.1652, -0.1992, -0.7520], device=torch_device) atol = 1e-3 if IS_ROCM_SYSTEM else 1e-4 self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=atol)) @slow def test_inference_image_classification_fourier(self): image_processor = PerceiverImageProcessor() model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier") model.to(torch_device) # prepare inputs image = prepare_img() inputs = image_processor(image, return_tensors="pt").pixel_values.to(torch_device) input_mask = None # forward pass with torch.no_grad(): outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits # verify logits expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([-1.1295, -0.2832, 0.3226], device=torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_classification_conv(self): image_processor = PerceiverImageProcessor() model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv") model.to(torch_device) # prepare inputs image = prepare_img() inputs = image_processor(image, return_tensors="pt").pixel_values.to(torch_device) input_mask = None # forward pass with torch.no_grad(): outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits # verify logits expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([-1.1186, 0.0554, 0.0897], device=torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_optical_flow(self): model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver") model.to(torch_device) # prepare inputs image1, image2 = prepare_optical_flow_images() img1 = normalize(np.array(image1)) img2 = normalize(np.array(image1)) # stack images img1 = torch.tensor(np.moveaxis(img1, -1, 0)) img2 = torch.tensor(np.moveaxis(img2, -1, 0)) images = torch.stack([img1, img2], dim=0) # extract 3x3 patches patch_size = model.config.train_size inputs = images[..., : patch_size[0], : patch_size[1]].unsqueeze(0) batch_size, _, C, H, W = inputs.shape patches = extract_image_patches(inputs.view(batch_size * 2, C, H, W), kernel=3) _, C, H, W = patches.shape patches = patches.view(batch_size, -1, C, H, W).float() # forward pass with torch.no_grad(): outputs = model(inputs=patches.to(torch_device)) logits = outputs.logits # verify logits expected_shape = torch.Size((1, 368, 496, 2)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[0.0025, -0.0050], [0.0025, -0.0049], [0.0025, -0.0048]], [[0.0026, -0.0049], [0.0026, -0.0048], [0.0026, -0.0047]], [[0.0026, -0.0049], [0.0026, -0.0048], [0.0026, -0.0046]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_interpolate_pos_encoding(self): image_processor = PerceiverImageProcessor(size={"height": 384, "width": 384}) model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned") model.to(torch_device) # prepare inputs image = prepare_img() inputs = image_processor(image, return_tensors="pt").pixel_values.to(torch_device) input_mask = None # forward pass with torch.no_grad(): outputs = model(inputs=inputs, attention_mask=input_mask, interpolate_pos_encoding=True) logits = outputs.logits # verify logits expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(logits.shape, expected_shape)
transformers/tests/models/perceiver/test_modeling_perceiver.py/0
{ "file_path": "transformers/tests/models/perceiver/test_modeling_perceiver.py", "repo_id": "transformers", "token_count": 21243 }
462
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right EN_CODE = 50003 PYTHON_CODE = 50002 @require_sentencepiece @require_tokenizers class PLBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "uclanlp/plbart-base" tokenizer_class = PLBartTokenizer rust_tokenizer_class = None test_rust_tokenizer = False def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base", keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_base_tokenizer(self): tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base", keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) end = tokenizer.vocab_size language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 4, end)] self.assertListEqual(language_tokens, ["__java__", "__python__", "__en_XX__", "<mask>"]) code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" input_ids = tokenizer(code).input_ids self.assertEqual( tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False), code, ) def test_full_multi_tokenizer(self): tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="multi", keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) end = tokenizer.vocab_size language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 7, end)] self.assertListEqual( language_tokens, ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" input_ids = tokenizer(code).input_ids self.assertEqual( tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False), code, ) @require_torch @require_sentencepiece @require_tokenizers class PLBartPythonEnIntegrationTest(unittest.TestCase): checkpoint_name = "uclanlp/plbart-python-en_XX" src_text = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] tgt_text = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] expected_src_tokens = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def setUpClass(cls): cls.tokenizer: PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name, language_codes="base", src_lang="python", tgt_lang="en_XX" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"], 50001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"], 50002) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"], 50003) def test_python_en_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_python_en_tokenizer_decode_ignores_language_codes(self): self.assertIn(PYTHON_CODE, self.tokenizer.all_special_ids) generated_ids = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_english = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_english) self.assertNotIn(self.tokenizer.eos_token, result) def test_python_en_tokenizer_truncation(self): src_text = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], PYTHON_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]), [50004, 50001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = PLBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist(), [2, PYTHON_CODE]) self.assertEqual(batch.decoder_input_ids[1][0], EN_CODE) self.assertEqual(batch.decoder_input_ids[1][-1], 2) self.assertEqual(batch.labels[1][-2:].tolist(), [2, EN_CODE]) @require_torch def test_python_en_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 26), batch.input_ids.shape) self.assertEqual((2, 26), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="en_XX", tgt_lang="java" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50001, }, )
transformers/tests/models/plbart/test_tokenization_plbart.py/0
{ "file_path": "transformers/tests/models/plbart/test_tokenization_plbart.py", "repo_id": "transformers", "token_count": 6897 }
463
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PvtV2 model.""" import inspect import tempfile import unittest from transformers import PvtV2Backbone, PvtV2Config, is_torch_available, is_vision_available from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoImageProcessor, PvtV2ForImageClassification, PvtV2Model if is_vision_available(): from PIL import Image class PvtV2ConfigTester(ConfigTester): def run_common_tests(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class PvtV2ModelTester(ModelTesterMixin): def __init__( self, parent, batch_size=13, image_size=None, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], out_indices=[0, 1, 2, 3], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = 64 if image_size is None else image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.out_indices = out_indices self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return PvtV2Config( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, sr_ratios=self.sr_ratios, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = PvtV2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertIsNotNone(result.last_hidden_state) def create_and_check_backbone(self, config, pixel_values, labels): model = PvtV2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = PvtV2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = PvtV2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) # test greyscale images config.num_channels = 1 model = PvtV2ForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class PvtV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PvtV2Model, PvtV2ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": PvtV2Model, "image-classification": PvtV2ForImageClassification} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = PvtV2ModelTester(self) self.config_tester = PvtV2ConfigTester(self, config_class=PvtV2Config) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Pvt-V2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Pvt-V2 does not have get_input_embeddings method and get_output_embeddings methods") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="This architecture does not work with using reentrant.") def test_training_gradient_checkpointing(self): # Scenario - 1 default behaviour self.check_training_gradient_checkpointing() @unittest.skip(reason="This architecture does not work with using reentrant.") def test_training_gradient_checkpointing_use_reentrant(self): # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's # torch.utils.checkpoint.checkpoint self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depths) self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[self.model_tester.out_indices[0]], self.model_tester.image_size // 2 ** (2 + self.model_tester.out_indices[0]), self.model_tester.image_size // 2 ** (2 + self.model_tester.out_indices[0]), ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @slow def test_model_from_pretrained(self): model_name = "OpenGVLab/pvt_v2_b0" model = PvtV2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class PvtV2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification(self): # only resize + normalize image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0").to(torch_device).eval() image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_model(self): model = PvtV2Model.from_pretrained("OpenGVLab/pvt_v2_b0").to(torch_device).eval() image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values) # verify the logits expected_shape = torch.Size((1, 50, 512)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0", torch_dtype=torch.float16) model.to(torch_device) image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device, dtype=torch.float16) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values) @require_torch class PvtV2BackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (PvtV2Backbone,) if is_torch_available() else () has_attentions = False config_class = PvtV2Config def test_config(self): config_class = self.config_class # test default config config = config_class() self.assertIsNotNone(config) num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers expected_stage_names = [f"stage{idx}" for idx in range(1, num_stages + 1)] self.assertEqual(config.stage_names, expected_stage_names) self.assertTrue(set(config.out_features).issubset(set(config.stage_names))) # Test out_features and out_indices are correctly set # out_features and out_indices both None config = config_class(out_features=None, out_indices=None) self.assertEqual(config.out_features, [config.stage_names[-1]]) self.assertEqual(config.out_indices, [len(config.stage_names) - 1]) # out_features and out_indices both set config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 1]) self.assertEqual(config.out_features, ["stage1", "stage2"]) self.assertEqual(config.out_indices, [0, 1]) # Only out_features set config = config_class(out_features=["stage2", "stage4"]) self.assertEqual(config.out_features, ["stage2", "stage4"]) self.assertEqual(config.out_indices, [1, 3]) # Only out_indices set config = config_class(out_indices=[0, 2]) self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]]) self.assertEqual(config.out_indices, [0, 2]) # Error raised when out_indices do not correspond to out_features with self.assertRaises(ValueError): config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2]) def test_config_save_pretrained(self): config_class = self.config_class config_first = config_class(out_indices=[0, 1, 2, 3]) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) # Fix issue where type switches in the saving process if isinstance(config_second.image_size, list): config_second.image_size = tuple(config_second.image_size) self.assertEqual(config_second.to_dict(), config_first.to_dict()) def setUp(self): self.model_tester = PvtV2ModelTester(self)
transformers/tests/models/pvt_v2/test_modeling_pvt_v2.py/0
{ "file_path": "transformers/tests/models/pvt_v2/test_modeling_pvt_v2.py", "repo_id": "transformers", "token_count": 7440 }
464
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class RagTokenizerTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) @require_tokenizers def test_save_load_pretrained_with_saved_config(self): save_dir = os.path.join(self.tmpdirname, "rag_tokenizer") rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict()) rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer()) rag_config.save_pretrained(save_dir) rag_tokenizer.save_pretrained(save_dir) new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config) self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast) self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab()) @slow def test_pretrained_token_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict) @slow def test_pretrained_sequence_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict)
transformers/tests/models/rag/test_tokenization_rag.py/0
{ "file_path": "transformers/tests/models/rag/test_tokenization_rag.py", "repo_id": "transformers", "token_count": 3143 }
465
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ResNet model.""" import unittest from transformers import ResNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ResNetBackbone, ResNetForImageClassification, ResNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class ResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = ResNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = ResNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = ResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = ResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ResNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ResNetModel, ResNetForImageClassification, ResNetBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = ResNetModelTester(self) self.config_tester = ConfigTester( self, config_class=ResNetConfig, has_text_modality=False, common_properties=["num_channels", "hidden_sizes"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ResNet does not support input and output embeddings") def test_model_get_set_embeddings(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="ResNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/resnet-50" model = ResNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("microsoft/resnet-50") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-11.1069, -9.7877, -8.3777]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @require_torch class ResNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (ResNetBackbone,) if is_torch_available() else () has_attentions = False config_class = ResNetConfig def setUp(self): self.model_tester = ResNetModelTester(self)
transformers/tests/models/resnet/test_modeling_resnet.py/0
{ "file_path": "transformers/tests/models/resnet/test_modeling_resnet.py", "repo_id": "transformers", "token_count": 4893 }
466
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch RoFormer model.""" import unittest from transformers import RoFormerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerSelfAttention, RoFormerSinusoidalPositionalEmbedding, ) class RoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RoFormerModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_generate_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoFormerForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=15, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=15, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = RoFormerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class RoFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RoFormerModel, RoFormerForMaskedLM, RoFormerForCausalLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (RoFormerForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": RoFormerModel, "fill-mask": RoFormerForMaskedLM, "question-answering": RoFormerForQuestionAnswering, "text-classification": RoFormerForSequenceClassification, "text-generation": RoFormerForCausalLM, "token-classification": RoFormerForTokenClassification, "zero-shot": RoFormerForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = RoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_generate_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_generate_causal_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): model_name = "junnyu/roformer_chinese_small" model = RoFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class RoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) # TODO Replace values below with what was printed above. expected_slice = torch.tensor( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @require_torch class RoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6).to(torch_device) emb = emb1(input_ids.shape) desired_weights = torch.tensor( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ).to(torch_device) self.assertTrue( torch.allclose(emb, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_positional_emb_weights_against_roformer(self): desired_weights = torch.tensor( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ).to(torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512).to(torch_device) weights = emb1.weight.data[:3, :5].to(torch_device) self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) @require_torch class RoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = ( torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) key_layer = ( -torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64).to(torch_device) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = RoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = torch.tensor( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ).to(torch_device) desired_key_layer = torch.tensor( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ).to(torch_device) self.assertTrue( torch.allclose(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_query_layer}\ngot:\n{query_layer}\n", ) self.assertTrue( torch.allclose(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_key_layer}\ngot:\n{key_layer}\n", )
transformers/tests/models/roformer/test_modeling_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_modeling_roformer.py", "repo_id": "transformers", "token_count": 11336 }
467
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import SeamlessM4TFeatureExtractor, SeamlessM4TProcessor from transformers.models.seamless_m4t import ( SeamlessM4TTokenizer, SeamlessM4TTokenizerFast, ) from transformers.testing_utils import require_torch from .test_feature_extraction_seamless_m4t import floats_list @require_torch class SeamlessM4TProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "facebook/hf-seamless-m4t-medium" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return SeamlessM4TTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return SeamlessM4TFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SeamlessM4TProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) tokenizer_instance = isinstance(processor.tokenizer, SeamlessM4TTokenizerFast) or isinstance( processor.tokenizer, SeamlessM4TTokenizer ) self.assertTrue(tokenizer_instance) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = SeamlessM4TProcessor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = SeamlessM4TProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) tokenizer_instance = isinstance(processor.tokenizer, SeamlessM4TTokenizerFast) or isinstance( processor.tokenizer, SeamlessM4TTokenizer ) self.assertTrue(tokenizer_instance) # Copied from test.models.whisper.test_processor_whisper.WhisperProcessorTest.test_feature_extractor with Whisper->SeamlessM4T def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(audios=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) # Copied from test.models.whisper.test_processor_whisper.WhisperProcessorTest.test_tokenizer with Whisper->SeamlessM4T def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) # Copied from test.models.whisper.test_processor_whisper.WhisperProcessorTest.test_tokenizer_decode with Whisper->SeamlessM4T def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/seamless_m4t/test_processor_seamless_m4t.py/0
{ "file_path": "transformers/tests/models/seamless_m4t/test_processor_seamless_m4t.py", "repo_id": "transformers", "token_count": 2078 }
468
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Table Transformer model.""" import inspect import math import unittest from huggingface_hub import hf_hub_download from transformers import ResNetConfig, TableTransformerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import TableTransformerForObjectDetection, TableTransformerModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TableTransformerModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=3, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return TableTransformerConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, backbone=None, use_pretrained_backbone=False, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_table_transformer_model(self, config, pixel_values, pixel_mask, labels): model = TableTransformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_table_transformer_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = TableTransformerForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) def create_and_check_table_transformer_no_timm_backbone(self, config, pixel_values, pixel_mask, labels): config.use_timm_backbone = False config.backbone_config = ResNetConfig() model = TableTransformerForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TableTransformerModel, TableTransformerForObjectDetection, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False zero_init_hidden_state = True # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["TableTransformerForObjectDetection"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = TableTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=TableTransformerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_table_transformer_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_model(*config_and_inputs) def test_table_transformer_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_object_detection_head_model(*config_and_inputs) def test_table_transformer_no_timm_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_no_timm_backbone(*config_and_inputs) @unittest.skip(reason="Table Transformer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Table Transformer does not use inputs_embeds") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="Table Transformer does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Table Transformer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Table Transformer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow @unittest.skip(reason="TODO Niels: fix me!") def test_model_outputs_equivalence(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "TableTransformerForObjectDetection": correct_outlen += 2 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.auxiliary_loss = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_outputs) self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.backbone_config = None config.use_timm_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "TableTransformerForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels + 1, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propogated to backbone self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) else: # Confirm out_indices was propogated to backbone self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_hf_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Load a pretrained HF checkpoint as backbone config.backbone = "microsoft/resnet-18" config.backbone_config = None config.use_timm_backbone = False config.use_pretrained_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "TableTransformerForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels + 1, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propogated to backbone self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) else: # Confirm out_indices was propogated to backbone self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # use greyscale pixel values inputs_dict["pixel_values"] = floats_tensor( [self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size] ) # let's set num_channels to 1 config.num_channels = 1 config.backbone_config.num_channels = 1 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class TableTransformerModelIntegrationTests(unittest.TestCase): def test_table_detection(self): image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection") model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection") model.to(torch_device) file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png") image = Image.open(file_path).convert("RGB") inputs = image_processor(image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) expected_shape = (1, 15, 3) self.assertEqual(outputs.logits.shape, expected_shape) expected_logits = torch.tensor( [[-6.7329, -16.9590, 6.7447], [-8.0038, -22.3071, 6.9288], [-7.2445, -20.9855, 7.3465]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_boxes = torch.tensor( [[0.4868, 0.1764, 0.6729], [0.6674, 0.4621, 0.3864], [0.4720, 0.1757, 0.6362]], device=torch_device ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-3))
transformers/tests/models/table_transformer/test_modeling_table_transformer.py/0
{ "file_path": "transformers/tests/models/table_transformer/test_modeling_table_transformer.py", "repo_id": "transformers", "token_count": 11586 }
469
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from parameterized import parameterized from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VideoLlavaImageProcessor class VideoLlavaImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=5, num_channels=3, image_size=18, min_resolution=30, max_resolution=80, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.expected_output_image_shape def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # let's simply copy the frames to fake a long video-clip if numpify or torchify: videos = [] for image in images: if numpify: video = image[None, ...].repeat(8, 0) else: video = image[None, ...].repeat(8, 1, 1, 1) videos.append(video) else: videos = [] for pil_image in images: videos.append([pil_image] * 8) return videos @require_torch @require_vision class VideoLlavaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VideoLlavaImageProcessor if is_vision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->VideoLlava def setUp(self): super().setUp() self.image_processor_tester = VideoLlavaImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.test_image_processor_from_dict_with_kwargs def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values_images expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values_images expected_output_image_shape = (5, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(images=image_inputs[0], return_tensors="pt").pixel_values_images expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(images=image_inputs, return_tensors="pt").pixel_values_images expected_output_image_shape = (5, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy_videos(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(numpify=True, equal_resolution=True) for video in video_inputs: self.assertIsInstance(video, np.ndarray) # Test not batched input encoded_videos = image_processing(images=None, videos=video_inputs[0], return_tensors="pt").pixel_values_videos expected_output_video_shape = (1, 8, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = image_processing(images=None, videos=video_inputs, return_tensors="pt").pixel_values_videos expected_output_video_shape = (5, 8, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) def test_call_pil_videos(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # the inputs come in list of lists batched format video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=True) for video in video_inputs: self.assertIsInstance(video[0], Image.Image) # Test not batched input encoded_videos = image_processing(images=None, videos=video_inputs[0], return_tensors="pt").pixel_values_videos expected_output_video_shape = (1, 8, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = image_processing(images=None, videos=video_inputs, return_tensors="pt").pixel_values_videos expected_output_video_shape = (5, 8, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values_images expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values_images expected_output_image_shape = (5, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch_videos(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=True, torchify=True) for video in video_inputs: self.assertIsInstance(video, torch.Tensor) # Test not batched input encoded_videos = image_processing(images=None, videos=video_inputs[0], return_tensors="pt").pixel_values_videos expected_output_video_shape = (1, 8, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = image_processing(images=None, videos=video_inputs, return_tensors="pt").pixel_values_videos expected_output_video_shape = (5, 8, 3, 18, 18) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) @parameterized.expand([(True, False), (False, True)]) def test_call_mixed(self, numpify, torchify): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs( equal_resolution=True, numpify=numpify, torchify=torchify ) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=True, torchify=torchify) # Test not batched input encoded = image_processing(images=image_inputs[0], videos=video_inputs[0], return_tensors="pt") expected_output_video_shape = (1, 8, 3, 18, 18) expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded.pixel_values_videos.shape), expected_output_video_shape) self.assertEqual(tuple(encoded.pixel_values_images.shape), expected_output_image_shape) # Test batched encoded = image_processing(images=image_inputs, videos=video_inputs, return_tensors="pt") expected_output_video_shape = (5, 8, 3, 18, 18) expected_output_image_shape = (5, 3, 18, 18) self.assertEqual(tuple(encoded.pixel_values_videos.shape), expected_output_video_shape) self.assertEqual(tuple(encoded.pixel_values_images.shape), expected_output_image_shape) def test_call_numpy_4_channels(self): # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input encoded_images = image_processor( image_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, ).pixel_values_images expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processor( image_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, ).pixel_values_images expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) )
transformers/tests/models/video_llava/test_image_processing_video_llava.py/0
{ "file_path": "transformers/tests/models/video_llava/test_image_processing_video_llava.py", "repo_id": "transformers", "token_count": 6116 }
470
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VisionTextDualEncoder model.""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image # Inspired by # https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py # From PyTorch internals def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_flax class VisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = FlaxVisionTextDualEncoderModel(config) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0] max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-3) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() # prepare inputs flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionTextDualEncoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 4e-2) def check_equivalence_pt_to_flax(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() vision_config = config_inputs_dict.pop("vision_config") text_config = config_inputs_dict.pop("text_config") inputs_dict = config_inputs_dict self.check_equivalence_pt_to_flax(vision_config, text_config, inputs_dict) self.check_equivalence_flax_to_pt(vision_config, text_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() outputs = model_2(**inputs) out_2 = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxVisionTextDualEncoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(**inputs) out_1 = after_outputs[0] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_flax class FlaxViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=True, text_from_pt=True, ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.config.text_config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = FlaxViTModel(vision_config) text_model = FlaxBertModel(text_config) return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = FlaxViTModelTester(self) bert_model_tester = FlaxBertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs text_config, input_ids, token_type_ids, attention_mask = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class FlaxCLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=True, text_from_pt=True, ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.config.text_config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = FlaxCLIPVisionModel(vision_config) text_model = FlaxBertModel(text_config) return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = FlaxCLIPVisionModelTester(self) bert_model_tester = FlaxBertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs text_config, input_ids, token_type_ids, attention_mask = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class FlaxVisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="np" ) outputs = model(**inputs) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = np.array([[1.2284727, 0.3104122]]) self.assertTrue(np.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
transformers/tests/models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py/0
{ "file_path": "transformers/tests/models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 6782 }
471
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.utils import FEATURE_EXTRACTOR_NAME from .test_feature_extraction_wav2vec2 import floats_list class Wav2Vec2ProcessorTest(unittest.TestCase): def setUp(self): vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.add_kwargs_tokens_map = { "pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs_init): kwargs = self.add_kwargs_tokens_map.copy() kwargs.update(kwargs_init) return Wav2Vec2CTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Wav2Vec2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = Wav2Vec2Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = Wav2Vec2Processor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = Wav2Vec2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
transformers/tests/models/wav2vec2/test_processor_wav2vec2.py/0
{ "file_path": "transformers/tests/models/wav2vec2/test_processor_wav2vec2.py", "repo_id": "transformers", "token_count": 2550 }
472
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow Whisper model.""" from __future__ import annotations import inspect import tempfile import traceback import unittest import numpy as np from transformers import WhisperConfig, WhisperFeatureExtractor, WhisperProcessor from transformers.testing_utils import is_tf_available, require_tf, require_tokenizers, run_test_in_subprocess, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_tf_available(): import tensorflow as tf from transformers import TFWhisperForConditionalGeneration, TFWhisperModel, set_seed from transformers.models.whisper.modeling_tf_whisper import ( TFWhisperDecoder, TFWhisperEncoder, sinusoidal_embedding_init, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = tf.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFWhisperModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=60, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = TFWhisperModel(config=config) input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFWhisperModel(config=config).get_decoder() # take a slice so we're shorter than the seqeuence length and can append later input_ids = inputs_dict["decoder_input_ids"][:, :-10] attention_mask = inputs_dict["decoder_attention_mask"][:, :-10] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_token = ids_tensor((self.batch_size, 3), config.vocab_size) next_tokens = tf.where(next_token <= 2, 2, next_token) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = np.random.randint(0, output_from_past.shape[-1]) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(np.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = TFWhisperModel(config=config) outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = TFWhisperEncoder.from_pretrained(tmpdirname) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = TFWhisperDecoder.from_pretrained(tmpdirname) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max() < 1e-3) @require_tf class TFWhisperModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFWhisperModel, TFWhisperForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFWhisperForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFWhisperModel} if is_tf_available() else {} is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False test_onnx = False input_name = "input_features" # TODO (ydshieh): undo skip once a fix is done on TF side. @unittest.skip("Skip for now as TF 2.13 breaks it on GPU") def test_xla_generate_slow(self): super().test_xla_generate_slow() def setUp(self): self.model_tester = TFWhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) model.build_in_name_scope() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_requires_grad_encoder_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) encoder = model.get_encoder() self.assertFalse(encoder.embed_positions.trainable) def test_encoder_sinusoidal_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.build_in_name_scope() embeds = model.get_encoder().embed_positions.get_weights()[0] sinusoids = sinusoidal_embedding_init(embeds.shape).numpy() self.assertTrue(np.allclose(embeds, sinusoids)) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 3 input_ids = input_ids[:max_batch_size, :, :] # generate max 3 tokens max_length = 4 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length # not implemented currently def test_inputs_embeds(self): pass @unittest.skip("Training is not yet supported") def test_training(self): pass def test_generate_with_head_masking(self): pass @unittest.skip("fp16 is not yet supported for TF models") def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = TFWhisperForConditionalGeneration(config) model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["decoder_position_ids", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", encoder_key_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = tf.zeros_like(input_ids[:, :1], dtype=tf.int64) + tf.convert_to_tensor( [model._get_decoder_start_token_id()] ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_features with self.assertRaises(AssertionError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_features, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_features, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids( model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) ) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_generate_with_prompt_ids_and_task_and_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.arange(5) language = "<|de|>" task = "translate" lang_id = 6 task_id = 7 model.generation_config.__setattr__("lang_to_id", {language: lang_id}) model.generation_config.__setattr__("task_to_id", {task: task_id}) output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, lang_id, task_id, ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_and_forced_decoder_ids(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(5)) forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] output = model.generate( input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids ) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, *[token for _rank, token in forced_decoder_ids], ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def _load_datasamples(num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _test_large_logits_librispeech(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="tf" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on unittest.TestCase().assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation_multilingual(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") ds = load_dataset("legacy-datasets/common_voice", "ja", split="test", streaming=True, trust_remote_code=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_batched_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids_1 = model.generate(input_features[0:2], max_length=20) generated_ids_2 = model.generate(input_features[2:4], max_length=20) generated_ids = np.concatenate([generated_ids_1, generated_ids_2]) # fmt: off EXPECTED_IDS = [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] # fmt: on unittest.TestCase().assertEqual(generated_ids.tolist(), EXPECTED_IDS) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all," ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) unittest.TestCase().assertListEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() @require_tf @require_tokenizers class TFWhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): return _load_datasamples(num_samples) @slow def test_tiny_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) # fmt: off EXPECTED_GENERATION = tf.convert_to_tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) # fmt: on head_logits = logits[0] @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) self.assertTrue(np.allclose(head_logits[0, 0, :30], EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-small.en") input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): run_test_in_subprocess(test_case=self, target_func=_test_large_logits_librispeech, inputs=None) @slow def test_tiny_en_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, num_beams=5, max_length=20) generated_ids_xla = xla_generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) transcript_xla = processor.tokenizer.decode(generated_ids_xla[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) self.assertEqual(transcript_xla, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation, inputs=None) @slow def test_large_generation_multilingual(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation_multilingual, inputs=None) @slow def test_large_batched_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_batched_generation, inputs=None) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, max_length=20) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, max_length=20) generated_ids_xla = xla_generate(input_features, max_length=20) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) self.assertTrue(np.allclose(generated_ids_xla, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) transcript_xla = processor.batch_decode(generated_ids_xla, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) self.assertListEqual(transcript_xla, EXPECTED_TRANSCRIPT)
transformers/tests/models/whisper/test_modeling_tf_whisper.py/0
{ "file_path": "transformers/tests/models/whisper/test_modeling_tf_whisper.py", "repo_id": "transformers", "token_count": 21508 }
473
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class FlaxXLMRobertaModelIntegrationTest(unittest.TestCase): @slow def test_flax_xlm_roberta_base(self): model = FlaxXLMRobertaModel.from_pretrained("FacebookAI/xlm-roberta-base") tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-roberta-base") text = "The dog is cute and lives in the garden house" input_ids = jnp.array([tokenizer.encode(text)]) expected_output_shape = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) output = model(input_ids)["last_hidden_state"] self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
transformers/tests/models/xlm_roberta/test_modeling_flax_xlm_roberta.py/0
{ "file_path": "transformers/tests/models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "repo_id": "transformers", "token_count": 690 }
474
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch YOSO model.""" import unittest from transformers import YosoConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, YosoModel, ) class YosoModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return YosoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = YosoModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = YosoForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = YosoForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = YosoForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class YosoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( YosoModel, YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": YosoModel, "fill-mask": YosoForMaskedLM, "question-answering": YosoForQuestionAnswering, "text-classification": YosoForSequenceClassification, "token-classification": YosoForTokenClassification, "zero-shot": YosoForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = YosoModelTester(self) self.config_tester = ConfigTester(self, config_class=YosoConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "uw-madison/yoso-4096" model = YosoModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="This model does not output attentions") def test_attention_outputs(self): return @require_torch class YosoModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = YosoModel.from_pretrained("uw-madison/yoso-4096") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0611, 0.1242, 0.0840], [0.0280, -0.0048, 0.1125], [0.0106, 0.0226, 0.0751]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_masked_lm(self): model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 50265 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.1313, -3.7285, -2.2407], [-2.7047, -3.3314, -2.6408], [0.0629, -2.5166, -0.3356]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_masked_lm_long_input(self): model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096") input_ids = torch.arange(4096).unsqueeze(0) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 50265 expected_shape = torch.Size((1, 4096, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.3914, -4.3742, -5.0956], [-4.0988, -4.2384, -7.0406], [-3.1427, -3.7192, -6.6800]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/yoso/test_modeling_yoso.py/0
{ "file_path": "transformers/tests/models/yoso/test_modeling_yoso.py", "repo_id": "transformers", "token_count": 7167 }
475
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_torch_available, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): image_classifier = ImageClassificationPipeline( model=model, image_processor=processor, top_k=2, torch_dtype=torch_dtype ) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @require_torch def test_torch_float16_pipeline(self): image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", torch_dtype=torch.float16 ) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=3), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) @require_torch def test_torch_bfloat16_pipeline(self): image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", torch_dtype=torch.bfloat16 ) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=3), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], ) @slow @require_torch def test_multilabel_classification(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) image_classifier.model.config.problem_type = "multi_label_classification" outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ], ) @slow @require_torch def test_function_to_apply(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier( "http://images.cocodataset.org/val2017/000000039769.jpg", function_to_apply="sigmoid", ) self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], )
transformers/tests/pipelines/test_pipelines_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_classification.py", "repo_id": "transformers", "token_count": 5466 }
476
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_av, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_av class VideoClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): example_video_filepath = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) video_classifier = VideoClassificationPipeline( model=model, image_processor=processor, top_k=2, torch_dtype=torch_dtype ) examples = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def run_pipeline_test(self, video_classifier, examples): for example in examples: outputs = video_classifier(example) self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" small_feature_extractor = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) video_classifier = pipeline( "video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4 ) video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset") outputs = video_classifier(video_file_path, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ) outputs = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ], ) @require_tf @unittest.skip def test_small_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_video_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_video_classification.py", "repo_id": "transformers", "token_count": 1564 }
477
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/transformers.git@v4.5.0.rc0 # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
transformers/tests/sagemaker/README.md/0
{ "file_path": "transformers/tests/sagemaker/README.md", "repo_id": "transformers", "token_count": 3293 }
478
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from parameterized import parameterized from transformers.testing_utils import require_flax, require_tf, require_torch, require_vision from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax if is_vision_available(): import PIL.Image from transformers.image_transforms import ( center_crop, center_to_corners_format, convert_to_rgb, corners_to_center_format, flip_channel_order, get_resize_output_image_size, id_to_rgb, normalize, pad, resize, rgb_to_id, to_channel_dimension_format, to_pil_image, ) def get_random_image(height, width, num_channels=3, channels_first=True): shape = (num_channels, height, width) if channels_first else (height, width, num_channels) random_array = np.random.randint(0, 256, shape, dtype=np.uint8) return random_array @require_vision class ImageTransformsTester(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) # Make sure that an exception is raised if image is not in [0, 1] image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): # Make sure binary mask remains a binary mask image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_tf def test_to_pil_image_from_tensorflow(self): # channels_first image = tf.random.uniform((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels_last image = tf.random.uniform((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_torch def test_to_pil_image_from_torch(self): # channels first image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels last image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_flax def test_to_pil_image_from_jax(self): key = jax.random.PRNGKey(0) # channel first image = jax.random.uniform(key, (3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channel last image = jax.random.uniform(key, (4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): # Test that function doesn't reorder if channel dim matches the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) # Test that function reorders if channel dim doesn't match the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) # Can pass in input_data_format and works if data format is ambiguous or unknown. image = np.random.rand(4, 5, 6) image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last") self.assertEqual(image.shape, (6, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test the output size defaults to (x, x) if an int is given. self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) # Test the output size is the same as the input if a two element tuple/list is given. self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) # To match pytorch behaviour, max_size is only relevant if size is an int self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) # Test output size = (int(size * height / width), size) if size is an int and height > width image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) # Test output size = (size, int(size * width / height)) if size is an int and width <= height image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) # Test size is resized if longer size > max_size image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) # Test output size = (int(size * height / width), size) if size is an int and height > width and # input has 4 channels image = np.random.randint(0, 256, (4, 50, 40)) self.assertEqual( get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"), (25, 20), ) # Test correct channel dimension is returned if output size if height == 3 # Defaults to input format - channels first image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) # Defaults to input format - channels last image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) # Check the channel order is the same by default resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) # Check channel order is changed if specified resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) # Check PIL.Image.Image is returned if return_numpy=False resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) # PIL size is in (width, height) order self.assertEqual(resized_image.size, (40, 30)) # Check an image with float values between 0-1 is returned with values in this range image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) # Check that an image with 4 channels is resized correctly image = np.random.randint(0, 256, (4, 224, 224)) resized_image = resize(image, (30, 40), input_data_format="channels_first") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (4, 30, 40)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 # Test that exception is raised if inputs are incorrect # Not a numpy array image with self.assertRaises(ValueError): normalize(5, 5, 5) # Number of mean values != number of channels with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) # Number of std values != number of channels with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) # Test result is correct - output data format is channels_first and normalization # correctly computed mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test image with 4 channels is normalized correctly image = np.random.randint(0, 256, (224, 224, 4)) / 255 mean = (0.5, 0.6, 0.7, 0.8) std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( np.allclose( normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 ) ) # Test float32 image input keeps float32 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).astype(np.float32) normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test float16 image input keeps float16 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) # The mean and std are cast to match the dtype of the input image cast_mean = np.array(mean, dtype=np.float16) cast_std = np.array(std, dtype=np.float16) expected_image = (image - cast_mean) / cast_std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float16) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test int image input is converted to float32 image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = (image.astype(np.float32) - mean) / std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test that exception is raised if inputs are incorrect with self.assertRaises(ValueError): center_crop(image, 10) # Test result is correct - output data format is channels_first and center crop # correctly computed expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that image is padded with zeros if crop size is larger than image size expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that odd numbered padding requirement still leads to correct output dimensions cropped_image = center_crop(image, (300, 259), data_format="channels_last") self.assertEqual(cropped_image.shape, (300, 259, 3)) # Test image with 4 channels is cropped correctly image = np.random.randint(0, 256, (224, 224, 4)) expected_image = image[52:172, 82:142, :] self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): # test list input rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) # test numpy array input color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): # test int input self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) # test array input id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) # fmt: on # Test that exception is raised if unknown padding mode is specified with self.assertRaises(ValueError): pad(image, 10, mode="unknown") # Test that exception is raised if invalid padding is specified with self.assertRaises(ValueError): # Cannot pad on channel dimension pad(image, (5, 10, 10)) # Test image is padded equally on all sides is padding is an int # fmt: off expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, 1))) # Test the left and right of each axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) # Test only one axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) # Test padding with a constant value # fmt: off expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) # fmt: off image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) # fmt: on # Test padding with PaddingMode.REFLECT # fmt: off expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) # Test padding with PaddingMode.REPLICATE # fmt: off expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) # Test padding with PaddingMode.SYMMETRIC # fmt: off expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) # Test we can specify the output data format # Test padding with PaddingMode.REFLECT # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) # fmt: on self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) # Test we can pad on an image with 2 channels # fmt: off image = np.array([ [[0, 1], [2, 3]], ]) expected_image = np.array([ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ]) # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) @require_vision def test_convert_to_rgb(self): # Test that an RGBA image is converted to RGB image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) # For the moment, numpy images are returned as is rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) # And PIL images are converted rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) # Test that a grayscale image is converted to RGB image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) # fmt: on img_channels_last = np.moveaxis(img_channels_first, 0, -1) # fmt: off flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) # fmt: on flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) ) # Can flip when the image has 2 channels # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], ]) # fmt: on flipped_img_channels_first = img_channels_first[::-1, :, :] self.assertTrue( np.allclose( flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first ) )
transformers/tests/test_image_transforms.py/0
{ "file_path": "transformers/tests/test_image_transforms.py", "repo_id": "transformers", "token_count": 12460 }
479
# coding=utf-8 # Copyright 2020 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import ( AutoModelForSeq2SeqLM, BertTokenizer, DataCollatorForSeq2Seq, EncoderDecoderModel, GenerationConfig, Seq2SeqTrainer, Seq2SeqTrainingArguments, T5Tokenizer, ) from transformers.testing_utils import TestCasePlus, require_sentencepiece, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets @require_sentencepiece class Seq2seqTrainerTester(TestCasePlus): @slow @require_torch def test_finetune_bert2bert(self): bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size bert2bert.config.eos_token_id = tokenizer.sep_token_id bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.max_length = 128 train_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="train[:1%]") val_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="validation[:1%]") train_dataset = train_dataset.select(range(32)) val_dataset = val_dataset.select(range(16)) batch_size = 4 def _map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all(len(x) == 512 for x in inputs.input_ids) assert all(len(x) == 128 for x in outputs.input_ids) return batch def _compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str) return {"accuracy": accuracy} # map train dataset train_dataset = train_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) output_dir = self.get_auto_remove_tmp_dir() training_args = Seq2SeqTrainingArguments( output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, eval_strategy="steps", do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2, report_to="none", ) # instantiate trainer trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, ) # start training trainer.train() @slow @require_torch def test_return_sequences(self): # Tests that the number of generated sequences is correct when num_return_sequences > 1 # and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics` INPUT_COLUMN = "question" TARGET_COLUMN = "answer" MAX_INPUT_LENGTH = 256 MAX_TARGET_LENGTH = 256 dataset = datasets.load_dataset("openai/gsm8k", "main", split="train[:38]") model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig.from_pretrained( "google-t5/t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5 ) training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True, report_to="none") trainer = Seq2SeqTrainer( model=model, args=training_args, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) def prepare_data(examples): # Remove pairs where at least one record is none inputs = examples[INPUT_COLUMN] targets = examples[TARGET_COLUMN] model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True) labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN]) dataset_len = len(prepared_dataset) # 38 for num_return_sequences in range(3, 0, -1): gen_config.num_return_sequences = num_return_sequences metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config) assert ( metrics["eval_samples"] == dataset_len * num_return_sequences ), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}" @require_torch def test_bad_generation_config_fail_early(self): # Tests that a bad geneartion config causes the trainer to fail early model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig(do_sample=False, top_p=0.9) # bad: top_p is not compatible with do_sample=False training_args = Seq2SeqTrainingArguments( ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) with self.assertRaises(ValueError) as exc: _ = Seq2SeqTrainer( model=model, args=training_args, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) self.assertIn("The loaded generation config instance is invalid", str(exc.exception))
transformers/tests/trainer/test_trainer_seq2seq.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_seq2seq.py", "repo_id": "transformers", "token_count": 3782 }
480
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from transformers.dynamic_module_utils import get_imports TOP_LEVEL_IMPORT = """ import os """ IMPORT_IN_FUNCTION = """ def foo(): import os return False """ DEEPLY_NESTED_IMPORT = """ def foo(): def bar(): if True: import os return False return bar() """ TOP_LEVEL_TRY_IMPORT = """ import os try: import bar except ImportError: raise ValueError() """ TRY_IMPORT_IN_FUNCTION = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ MULTIPLE_EXCEPTS_IMPORT = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ EXCEPT_AS_IMPORT = """ import os try: import bar except ImportError as e: raise ValueError() """ GENERIC_EXCEPT_IMPORT = """ import os try: import bar except: raise ValueError() """ MULTILINE_TRY_IMPORT = """ import os try: import bar import baz except ImportError: raise ValueError() """ MULTILINE_BOTH_IMPORT = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ CASES = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case", CASES) def test_import_parsing(tmp_path, case): tmp_file_path = os.path.join(tmp_path, "test_file.py") with open(tmp_file_path, "w") as _tmp_file: _tmp_file.write(case) parsed_imports = get_imports(tmp_file_path) assert parsed_imports == ["os"]
transformers/tests/utils/test_dynamic_module_utils.py/0
{ "file_path": "transformers/tests/utils/test_dynamic_module_utils.py", "repo_id": "transformers", "token_count": 918 }
481
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys from typing import Tuple from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class OfflineTests(TestCasePlus): @require_torch def test_offline_mode(self): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched load = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ run = """ mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") """ mock = """ import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn't access internet") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipeline(task="fill-mask", model=mname) # baseline - just load from_pretrained with normal network # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files stdout, _ = self._execute_with_env(load, run, mock, TRANSFORMERS_OFFLINE="1") self.assertIn("success", stdout) @require_torch def test_offline_mode_no_internet(self): # python one-liner segments # this must be loaded before socket.socket is monkey-patched load = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ run = """ mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") """ mock = """ import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipeline(task="fill-mask", model=mname) # baseline - just load from_pretrained with normal network # should succeed stdout, _ = self._execute_with_env(load, run, mock) self.assertIn("success", stdout) @require_torch def test_offline_mode_sharded_checkpoint(self): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched load = """ from transformers import BertConfig, BertModel, BertTokenizer """ run = """ mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") """ mock = """ import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket """ # baseline - just load from_pretrained with normal network # should succeed stdout, _ = self._execute_with_env(load, run) self.assertIn("success", stdout) # next emulate no network # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # self._execute_with_env(load, mock, run, should_fail=True, TRANSFORMERS_OFFLINE="0") # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files stdout, _ = self._execute_with_env(load, mock, run, TRANSFORMERS_OFFLINE="1") self.assertIn("success", stdout) @require_torch def test_offline_mode_pipeline_exception(self): load = """ from transformers import pipeline """ run = """ mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) """ mock = """ import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket """ _, stderr = self._execute_with_env(load, mock, run, should_fail=True, TRANSFORMERS_OFFLINE="1") self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode", stderr.replace("\n", ""), ) @require_torch def test_offline_model_dynamic_model(self): load = """ from transformers import AutoModel """ run = """ mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") """ # baseline - just load from_pretrained with normal network # should succeed stdout, _ = self._execute_with_env(load, run) self.assertIn("success", stdout) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files stdout, _ = self._execute_with_env(load, run, TRANSFORMERS_OFFLINE="1") self.assertIn("success", stdout) def test_is_offline_mode(self): """ Test `_is_offline_mode` helper (should respect both HF_HUB_OFFLINE and legacy TRANSFORMERS_OFFLINE env vars) """ load = "from transformers.utils import is_offline_mode" run = "print(is_offline_mode())" stdout, _ = self._execute_with_env(load, run) self.assertIn("False", stdout) stdout, _ = self._execute_with_env(load, run, TRANSFORMERS_OFFLINE="1") self.assertIn("True", stdout) stdout, _ = self._execute_with_env(load, run, HF_HUB_OFFLINE="1") self.assertIn("True", stdout) def _execute_with_env(self, *commands: Tuple[str, ...], should_fail: bool = False, **env) -> Tuple[str, str]: """Execute Python code with a given environment and return the stdout/stderr as strings. If `should_fail=True`, the command is expected to fail. Otherwise, it should succeed. Environment variables can be passed as keyword arguments. """ # Build command cmd = [sys.executable, "-c", "\n".join(commands)] # Configure env new_env = self.get_env() new_env.update(env) # Run command result = subprocess.run(cmd, env=new_env, check=False, capture_output=True) # Check execution if should_fail: self.assertNotEqual(result.returncode, 0, result.stderr) else: self.assertEqual(result.returncode, 0, result.stderr) # Return output return result.stdout.decode(), result.stderr.decode()
transformers/tests/utils/test_offline.py/0
{ "file_path": "transformers/tests/utils/test_offline.py", "repo_id": "transformers", "token_count": 2964 }
482
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that performs several consistency checks on the repo. This includes: - checking all models are properly defined in the __init__ of models/ - checking all models are in the main __init__ - checking all models are properly tested - checking all object in the main __init__ are documented - checking all models are in at least one auto class - checking all the auto mapping are properly defined (no typos, importable) - checking the list of deprecated models is up to date Use from the root of the repo with (as used in `make repo-consistency`): ```bash python utils/check_repo.py ``` It has no auto-fix mode. """ import inspect import os import re import sys import types import warnings from collections import OrderedDict from difflib import get_close_matches from pathlib import Path from typing import List, Tuple from transformers import is_flax_available, is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES from transformers.models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING_NAMES from transformers.models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING_NAMES from transformers.models.auto.processing_auto import PROCESSOR_MAPPING_NAMES from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES from transformers.utils import ENV_VARS_TRUE_VALUES, direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_repo.py PATH_TO_TRANSFORMERS = "src/transformers" PATH_TO_TESTS = "tests" PATH_TO_DOC = "docs/source/en" # Update this list with models that are supposed to be private. PRIVATE_MODELS = [ "AltRobertaModel", "DPRSpanPredictor", "UdopStack", "LongT5Stack", "RealmBertModel", "T5Stack", "MT5Stack", "UMT5Stack", "Pop2PianoStack", "Qwen2AudioEncoder", "Qwen2VisionTransformerPretrainedModel", "SwitchTransformersStack", "TFDPRSpanPredictor", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", "BridgeTowerTextModel", "BridgeTowerVisionModel", "Kosmos2TextModel", "Kosmos2TextForCausalLM", "Kosmos2VisionModel", "SeamlessM4Tv2TextToUnitModel", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2TextToUnitForConditionalGeneration", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = ( PRIVATE_MODELS.copy() + [ # models to ignore for not tested "RecurrentGemmaModel", # Building part of bigger (tested) model. "FuyuForCausalLM", # Not tested fort now "InstructBlipQFormerModel", # Building part of bigger (tested) model. "InstructBlipVideoQFormerModel", # Building part of bigger (tested) model. "UMT5EncoderModel", # Building part of bigger (tested) model. "Blip2QFormerModel", # Building part of bigger (tested) model. "ErnieMForInformationExtraction", "FastSpeech2ConformerHifiGan", # Already tested by SpeechT5HifiGan (# Copied from) "FastSpeech2ConformerWithHifiGan", # Built with two smaller (tested) models. "GraphormerDecoderHead", # Building part of bigger (tested) model. "JukeboxVQVAE", # Building part of bigger (tested) model. "JukeboxPrior", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. "MgpstrModel", # Building part of bigger (tested) model. "BertLMHeadModel", # Needs to be setup as decoder. "MegatronBertLMHeadModel", # Building part of bigger (tested) model. "RealmBertModel", # Building part of bigger (tested) model. "RealmReader", # Not regular model. "RealmScorer", # Not regular model. "RealmForOpenQA", # Not regular model. "ReformerForMaskedLM", # Needs to be setup as decoder. "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) "TFRobertaForMultipleChoice", # TODO: fix "TFRobertaPreLayerNormForMultipleChoice", # TODO: fix "SeparableConv1D", # Building part of bigger (tested) model. "FlaxBartForCausalLM", # Building part of bigger (tested) model. "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. "OPTDecoderWrapper", "TFSegformerDecodeHead", # Not a regular model. "AltRobertaModel", # Building part of bigger (tested) model. "BlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models "TFBlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models "BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model. "BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model. "BarkCausalModel", # Building part of bigger (tested) model. "BarkModel", # Does not have a forward signature - generation tested with integration tests. "SeamlessM4TTextToUnitModel", # Building part of bigger (tested) model. "SeamlessM4TCodeHifiGan", # Building part of bigger (tested) model. "SeamlessM4TTextToUnitForConditionalGeneration", # Building part of bigger (tested) model. "ChameleonVQVAE", # VQVAE here is used only for encoding (discretizing) and is tested as part of bigger model "Qwen2VLModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2VLForConditionalGeneration. ] ) # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. TEST_FILES_WITH_NO_COMMON_TESTS = [ "models/decision_transformer/test_modeling_decision_transformer.py", "models/camembert/test_modeling_camembert.py", "models/mt5/test_modeling_flax_mt5.py", "models/mbart/test_modeling_mbart.py", "models/mt5/test_modeling_mt5.py", "models/pegasus/test_modeling_pegasus.py", "models/camembert/test_modeling_tf_camembert.py", "models/mt5/test_modeling_tf_mt5.py", "models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", "models/xlm_roberta/test_modeling_xlm_roberta.py", "models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", "models/decision_transformer/test_modeling_decision_transformer.py", "models/bark/test_modeling_bark.py", ] # Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping "AlignTextModel", "AlignVisionModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", "Blip2ForConditionalGeneration", "Blip2TextModelWithProjection", "Blip2VisionModelWithProjection", "Blip2QFormerModel", "Blip2VisionModel", "ErnieMForInformationExtraction", "FastSpeech2ConformerHifiGan", "FastSpeech2ConformerWithHifiGan", "GitVisionModel", "GraphormerModel", "GraphormerForGraphClassification", "BlipForConditionalGeneration", "BlipForImageTextRetrieval", "BlipForQuestionAnswering", "BlipVisionModel", "BlipTextLMHeadModel", "BlipTextModel", "BrosSpadeEEForTokenClassification", "BrosSpadeELForTokenClassification", "TFBlipForConditionalGeneration", "TFBlipForImageTextRetrieval", "TFBlipForQuestionAnswering", "TFBlipVisionModel", "TFBlipTextLMHeadModel", "TFBlipTextModel", "Swin2SRForImageSuperResolution", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerForContrastiveLearning", "CLIPSegForImageSegmentation", "CLIPSegVisionModel", "CLIPSegTextModel", "EsmForProteinFolding", "GPTSanJapaneseModel", "TimeSeriesTransformerForPrediction", "InformerForPrediction", "AutoformerForPrediction", "PatchTSTForPretraining", "PatchTSTForPrediction", "JukeboxVQVAE", "JukeboxPrior", "SamModel", "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", "ViltForImagesAndTextClassification", "ViltForImageAndTextRetrieval", "ViltForTokenClassification", "ViltForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "SegformerDecodeHead", "TFSegformerDecodeHead", "FlaxBeitForMaskedImageModeling", "BeitForMaskedImageModeling", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModelWithProjection", "ClvpForCausalLM", "ClvpModel", "GroupViTTextModel", "GroupViTVisionModel", "TFCLIPTextModel", "TFCLIPVisionModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", "FlaxCLIPTextModel", "FlaxCLIPTextModelWithProjection", "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", "DetrForSegmentation", "Pix2StructVisionModel", "Pix2StructTextModel", "Pix2StructForConditionalGeneration", "ConditionalDetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", "FlavaImageCodebook", "FlavaTextModel", "FlavaImageModel", "FlavaMultimodalModel", "GPT2DoubleHeadsModel", "GPTSw3DoubleHeadsModel", "InstructBlipVisionModel", "InstructBlipQFormerModel", "InstructBlipVideoVisionModel", "InstructBlipVideoQFormerModel", "LayoutLMForQuestionAnswering", "LukeForMaskedLM", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "MgpstrModel", "OpenAIGPTDoubleHeadsModel", "OwlViTTextModel", "OwlViTVisionModel", "Owlv2TextModel", "Owlv2VisionModel", "OwlViTForObjectDetection", "PatchTSMixerForPrediction", "PatchTSMixerForPretraining", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmEmbedder", "RealmForOpenQA", "RealmScorer", "RealmReader", "TFDPRReader", "TFGPT2DoubleHeadsModel", "TFLayoutLMForQuestionAnswering", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "Wav2Vec2ForCTC", "HubertForCTC", "SEWForCTC", "SEWDForCTC", "XLMForQuestionAnswering", "XLNetForQuestionAnswering", "SeparableConv1D", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", "VisualBertForQuestionAnswering", "VisualBertForMultipleChoice", "TFWav2Vec2ForCTC", "TFHubertForCTC", "XCLIPVisionModel", "XCLIPTextModel", "AltCLIPTextModel", "AltCLIPVisionModel", "AltRobertaModel", "TvltForAudioVisualClassification", "BarkCausalModel", "BarkCoarseModel", "BarkFineModel", "BarkSemanticModel", "MusicgenMelodyModel", "MusicgenModel", "MusicgenForConditionalGeneration", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5HifiGan", "VitMatteForImageMatting", "SeamlessM4TTextToUnitModel", "SeamlessM4TTextToUnitForConditionalGeneration", "SeamlessM4TCodeHifiGan", "SeamlessM4TForSpeechToSpeech", # no auto class for speech-to-speech "TvpForVideoGrounding", "UdopForConditionalGeneration", "SeamlessM4Tv2NARTextToUnitModel", "SeamlessM4Tv2NARTextToUnitForConditionalGeneration", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2ForSpeechToSpeech", # no auto class for speech-to-speech "SegGptForImageSegmentation", "SiglipVisionModel", "SiglipTextModel", "ChameleonVQVAE", # no autoclass for VQ-VAE models ] # DO NOT edit this list! # (The corresponding pytorch objects should never have been in the main `__init__`, but it's too late to remove) OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK = [ "FlaxBertLayer", "FlaxBigBirdLayer", "FlaxRoFormerLayer", "TFBertLayer", "TFLxmertEncoder", "TFLxmertXLayer", "TFMPNetLayer", "TFMobileBertLayer", "TFSegformerLayer", "TFViTMAELayer", ] # Update this list for models that have multiple model types for the same model doc. MODEL_TYPE_TO_DOC_MAPPING = OrderedDict( [ ("data2vec-text", "data2vec"), ("data2vec-audio", "data2vec"), ("data2vec-vision", "data2vec"), ("donut-swin", "donut"), ] ) # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) def check_missing_backends(): """ Checks if all backends are installed (otherwise the check of this script is incomplete). Will error in the CI if that's not the case but only throw a warning for users running this. """ missing_backends = [] if not is_torch_available(): missing_backends.append("PyTorch") if not is_tf_available(): missing_backends.append("TensorFlow") if not is_flax_available(): missing_backends.append("Flax") if len(missing_backends) > 0: missing = ", ".join(missing_backends) if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: raise Exception( "Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the " f"Transformers repo, the following are missing: {missing}." ) else: warnings.warn( "Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the " f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " "didn't make any change in one of those backends modeling files, you should probably execute the " "command above to be on the safe side." ) def check_model_list(): """ Checks the model listed as subfolders of `models` match the models available in `transformers.models`. """ # Get the models from the directory structure of `src/transformers/models/` models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models") _models = [] for model in os.listdir(models_dir): if model == "deprecated": continue model_dir = os.path.join(models_dir, model) if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): _models.append(model) # Get the models in the submodule `transformers.models` models = [model for model in dir(transformers.models) if not model.startswith("__")] missing_models = sorted(set(_models).difference(models)) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." ) # If some modeling modules should be ignored for all checks, they should be added in the nested list # _ignore_modules of this function. def get_model_modules() -> List[str]: """Get all the model modules inside the transformers library (except deprecated models).""" _ignore_modules = [ "modeling_auto", "modeling_encoder_decoder", "modeling_marian", "modeling_mmbt", "modeling_outputs", "modeling_retribert", "modeling_utils", "modeling_flax_auto", "modeling_flax_encoder_decoder", "modeling_flax_utils", "modeling_speech_encoder_decoder", "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_timm_backbone", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_outputs", "modeling_tf_pytorch_utils", "modeling_tf_utils", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] modules = [] for model in dir(transformers.models): # There are some magic dunder attributes in the dir, we ignore them if model == "deprecated" or model.startswith("__"): continue model_module = getattr(transformers.models, model) for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) if inspect.ismodule(modeling_module): modules.append(modeling_module) return modules def get_models(module: types.ModuleType, include_pretrained: bool = False) -> List[Tuple[str, type]]: """ Get the objects in a module that are models. Args: module (`types.ModuleType`): The module from which we are extracting models. include_pretrained (`bool`, *optional*, defaults to `False`): Whether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not. Returns: List[Tuple[str, type]]: List of models as tuples (class name, actual class). """ models = [] model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel) for attr_name in dir(module): if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name): continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: models.append((attr_name, attr)) return models def is_building_block(model: str) -> bool: """ Returns `True` if a model is a building block part of a bigger model. """ if model.endswith("Wrapper"): return True if model.endswith("Encoder"): return True if model.endswith("Decoder"): return True if model.endswith("Prenet"): return True def is_a_private_model(model: str) -> bool: """Returns `True` if the model should not be in the main init.""" if model in PRIVATE_MODELS: return True return is_building_block(model) def check_models_are_in_init(): """Checks all models defined in the library are in the main init.""" models_not_in_init = [] dir_transformers = dir(transformers) for module in get_model_modules(): models_not_in_init += [ model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers ] # Remove private models models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] if len(models_not_in_init) > 0: raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") # If some test_modeling files should be ignored when checking models are all tested, they should be added in the # nested list _ignore_files of this function. def get_model_test_files() -> List[str]: """ Get the model test files. Returns: `List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files. """ _ignore_files = [ "test_modeling_common", "test_modeling_encoder_decoder", "test_modeling_flax_encoder_decoder", "test_modeling_flax_speech_encoder_decoder", "test_modeling_marian", "test_modeling_tf_common", "test_modeling_tf_encoder_decoder", ] test_files = [] model_test_root = os.path.join(PATH_TO_TESTS, "models") model_test_dirs = [] for x in os.listdir(model_test_root): x = os.path.join(model_test_root, x) if os.path.isdir(x): model_test_dirs.append(x) for target_dir in [PATH_TO_TESTS] + model_test_dirs: for file_or_dir in os.listdir(target_dir): path = os.path.join(target_dir, file_or_dir) if os.path.isfile(path): filename = os.path.split(path)[-1] if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: file = os.path.join(*path.split(os.sep)[1:]) test_files.append(file) return test_files # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class # for the all_model_classes variable. def find_tested_models(test_file: str) -> List[str]: """ Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from the common test class. Args: test_file (`str`): The path to the test file to check Returns: `List[str]`: The list of models tested in that file. """ with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: content = f.read() all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) # Check with one less parenthesis as well all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) if len(all_models) > 0: model_tested = [] for entry in all_models: for line in entry.split(","): name = line.strip() if len(name) > 0: model_tested.append(name) return model_tested def should_be_tested(model_name: str) -> bool: """ Whether or not a model should be tested. """ if model_name in IGNORE_NON_TESTED: return False return not is_building_block(model_name) def check_models_are_tested(module: types.ModuleType, test_file: str) -> List[str]: """Check models defined in a module are all tested in a given file. Args: module (`types.ModuleType`): The module in which we get the models. test_file (`str`): The path to the file where the module is tested. Returns: `List[str]`: The list of error messages corresponding to models not tested. """ # XxxPreTrainedModel are not tested defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and should_be_tested(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures def check_all_models_are_tested(): """Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: # Matches a module to its test file. test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] if len(test_file) == 0: failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") elif len(test_file) > 1: failures.append(f"{module.__name__} has several test files: {test_file}.") else: test_file = test_file[0] new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def get_all_auto_configured_models() -> List[str]: """Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. if is_torch_available(): for attr_name in dir(transformers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name))) if is_tf_available(): for attr_name in dir(transformers.models.auto.modeling_tf_auto): if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name))) if is_flax_available(): for attr_name in dir(transformers.models.auto.modeling_flax_auto): if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name))) return list(result) def ignore_unautoclassed(model_name: str) -> bool: """Rules to determine if a model should be in an auto class.""" # Special white list if model_name in IGNORE_NON_AUTO_CONFIGURED: return True # Encoder and Decoder should be ignored if "Encoder" in model_name or "Decoder" in model_name: return True return False def check_models_are_auto_configured(module: types.ModuleType, all_auto_models: List[str]) -> List[str]: """ Check models defined in module are each in an auto class. Args: module (`types.ModuleType`): The module in which we get the models. all_auto_models (`List[str]`): The list of all models in an auto class (as obtained with `get_all_auto_configured_models()`). Returns: `List[str]`: The list of error messages corresponding to models not tested. """ defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and not ignore_unautoclassed(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " "`utils/check_repo.py`." ) return failures def check_all_models_are_auto_configured(): """Check all models are each in an auto class.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_object_names_being_defined(): """Check all names defined in auto (name) mappings exist in the library.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] mappings_to_check = { "TOKENIZER_MAPPING_NAMES": TOKENIZER_MAPPING_NAMES, "IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES, "FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES, "PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES, } # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name, mapping in mappings_to_check.items(): for _, class_names in mapping.items(): if not isinstance(class_names, tuple): class_names = (class_names,) for class_name in class_names: if class_name is None: continue # dummy object is accepted if not hasattr(transformers, class_name): # If the class name is in a model name mapping, let's not check if there is a definition in any modeling # module, if it's a private model defined in this file. if name.endswith("MODEL_MAPPING_NAMES") and is_a_private_model(class_name): continue if name.endswith("MODEL_FOR_IMAGE_MAPPING_NAMES") and is_a_private_model(class_name): continue failures.append( f"`{class_name}` appears in the mapping `{name}` but it is not defined in the library." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_mapping_names_in_config_mapping_names(): """Check all keys defined in auto mappings (mappings of names) appear in `CONFIG_MAPPING_NAMES`.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] # `TOKENIZER_PROCESSOR_MAPPING_NAMES` and `AutoTokenizer` is special, and don't need to follow the rule. mappings_to_check = { "IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES, "FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES, "PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES, } # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name, mapping in mappings_to_check.items(): for model_type in mapping: if model_type not in CONFIG_MAPPING_NAMES: failures.append( f"`{model_type}` appears in the mapping `{name}` but it is not defined in the keys of " "`CONFIG_MAPPING_NAMES`." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_mappings_importable(): """Check all auto mappings can be imported.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] mappings_to_check = {} # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name in mappings_to_check: name = name.replace("_MAPPING_NAMES", "_MAPPING") if not hasattr(transformers, name): failures.append(f"`{name}`") if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_objects_being_equally_in_main_init(): """ Check if a (TensorFlow or Flax) object is in the main __init__ iif its counterpart in PyTorch is. """ attrs = dir(transformers) failures = [] for attr in attrs: obj = getattr(transformers, attr) if not hasattr(obj, "__module__") or "models.deprecated" in obj.__module__: continue module_path = obj.__module__ module_name = module_path.split(".")[-1] module_dir = ".".join(module_path.split(".")[:-1]) if ( module_name.startswith("modeling_") and not module_name.startswith("modeling_tf_") and not module_name.startswith("modeling_flax_") ): parent_module = sys.modules[module_dir] frameworks = [] if is_tf_available(): frameworks.append("TF") if is_flax_available(): frameworks.append("Flax") for framework in frameworks: other_module_path = module_path.replace("modeling_", f"modeling_{framework.lower()}_") if os.path.isfile("src/" + other_module_path.replace(".", "/") + ".py"): other_module_name = module_name.replace("modeling_", f"modeling_{framework.lower()}_") other_module = getattr(parent_module, other_module_name) if hasattr(other_module, f"{framework}{attr}"): if not hasattr(transformers, f"{framework}{attr}"): if f"{framework}{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK: failures.append(f"{framework}{attr}") if hasattr(other_module, f"{framework}_{attr}"): if not hasattr(transformers, f"{framework}_{attr}"): if f"{framework}_{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK: failures.append(f"{framework}_{attr}") if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) _re_decorator = re.compile(r"^\s*@(\S+)\s+$") def check_decorator_order(filename: str) -> List[int]: """ Check that in a given test file, the slow decorator is always last. Args: filename (`str`): The path to a test file to check. Returns: `List[int]`: The list of failures as a list of indices where there are problems. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() decorator_before = None errors = [] for i, line in enumerate(lines): search = _re_decorator.search(line) if search is not None: decorator_name = search.groups()[0] if decorator_before is not None and decorator_name.startswith("parameterized"): errors.append(i) decorator_before = decorator_name elif decorator_before is not None: decorator_before = None return errors def check_all_decorator_order(): """Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( "The parameterized decorator (and its variants) should always be first, but this is not the case in the" f" following files:\n{msg}" ) def find_all_documented_objects() -> List[str]: """ Parse the content of all doc files to detect which classes and functions it documents. Returns: `List[str]`: The list of all object names being documented. """ documented_obj = [] for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] return documented_obj # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. DEPRECATED_OBJECTS = [ "AutoModelWithLMHead", "BartPretrainedModel", "DataCollator", "DataCollatorForSOP", "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "LogitsWarper", "NerPipeline", "PretrainedBartModel", "PretrainedFSMTModel", "SingleSentenceClassificationProcessor", "SquadDataTrainingArguments", "SquadDataset", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "TFAutoModelWithLMHead", "TFBartPretrainedModel", "TextDataset", "TextDatasetForNextSentencePrediction", "Wav2Vec2ForMaskedLM", "Wav2Vec2Tokenizer", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", "TFTrainingArguments", ] # Exceptionally, some objects should not be documented after all rules passed. # ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! UNDOCUMENTED_OBJECTS = [ "AddedToken", # This is a tokenizers class. "BasicTokenizer", # Internal, should never have been in the main init. "CharacterTokenizer", # Internal, should never have been in the main init. "DPRPretrainedReader", # Like an Encoder. "DummyObject", # Just picked by mistake sometimes. "MecabTokenizer", # Internal, should never have been in the main init. "ModelCard", # Internal type. "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) "TFDPRPretrainedReader", # Like an Encoder. "TransfoXLCorpus", # Internal type. "WordpieceTokenizer", # Internal, should never have been in the main init. "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module "requires_backends", # Internal function "AltRobertaModel", # Internal module ] # This list should be empty. Objects in it should get their own doc page. SHOULD_HAVE_THEIR_OWN_PAGE = [ # Benchmarks "PyTorchBenchmark", "PyTorchBenchmarkArguments", "TensorFlowBenchmark", "TensorFlowBenchmarkArguments", "AutoBackbone", "BeitBackbone", "BitBackbone", "ConvNextBackbone", "ConvNextV2Backbone", "DinatBackbone", "Dinov2Backbone", "FocalNetBackbone", "HieraBackbone", "MaskFormerSwinBackbone", "MaskFormerSwinConfig", "MaskFormerSwinModel", "NatBackbone", "PvtV2Backbone", "ResNetBackbone", "SwinBackbone", "Swinv2Backbone", "TimmBackbone", "TimmBackboneConfig", "VitDetBackbone", ] def ignore_undocumented(name: str) -> bool: """Rules to determine if `name` should be undocumented (returns `True` if it should not be documented).""" # NOT DOCUMENTED ON PURPOSE. # Constants uppercase are not documented. if name.isupper(): return True # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented. if ( name.endswith("PreTrainedModel") or name.endswith("Decoder") or name.endswith("Encoder") or name.endswith("Layer") or name.endswith("Embeddings") or name.endswith("Attention") ): return True # Submodules are not documented. if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile( os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py") ): return True # All load functions are not documented. if name.startswith("load_tf") or name.startswith("load_pytorch"): return True # is_xxx_available functions are not documented. if name.startswith("is_") and name.endswith("_available"): return True # Deprecated objects are not documented. if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: return True # MMBT model does not really work. if name.startswith("MMBT"): return True if name in SHOULD_HAVE_THEIR_OWN_PAGE: return True return False def check_all_objects_are_documented(): """Check all models are properly documented.""" documented_objs = find_all_documented_objects() modules = transformers._modules objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) check_docstrings_are_in_md() check_model_type_doc_match() def check_model_type_doc_match(): """Check all doc pages have a corresponding model type.""" model_doc_folder = Path(PATH_TO_DOC) / "model_doc" model_docs = [m.stem for m in model_doc_folder.glob("*.md")] model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys()) model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types] errors = [] for m in model_docs: if m not in model_types and m != "auto": close_matches = get_close_matches(m, model_types) error_message = f"{m} is not a proper model identifier." if len(close_matches) > 0: close_matches = "/".join(close_matches) error_message += f" Did you mean {close_matches}?" errors.append(error_message) if len(errors) > 0: raise ValueError( "Some model doc pages do not match any existing model type:\n" + "\n".join(errors) + "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in " "models/auto/configuration_auto.py." ) # Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`. _re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`") # Re pattern to catch things between double backquotes. _re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)") # Re pattern to catch example introduction. _re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE) def is_rst_docstring(docstring: str) -> True: """ Returns `True` if `docstring` is written in rst. """ if _re_rst_special_words.search(docstring) is not None: return True if _re_double_backquotes.search(docstring) is not None: return True if _re_rst_example.search(docstring) is not None: return True return False def check_docstrings_are_in_md(): """Check all docstrings are written in md and nor rst.""" files_with_rst = [] for file in Path(PATH_TO_TRANSFORMERS).glob("**/*.py"): with open(file, encoding="utf-8") as f: code = f.read() docstrings = code.split('"""') for idx, docstring in enumerate(docstrings): if idx % 2 == 0 or not is_rst_docstring(docstring): continue files_with_rst.append(file) break if len(files_with_rst) > 0: raise ValueError( "The following files have docstrings written in rst:\n" + "\n".join([f"- {f}" for f in files_with_rst]) + "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n" "(`pip install git+https://github.com/huggingface/doc-builder`)" ) def check_deprecated_constant_is_up_to_date(): """ Check if the constant `DEPRECATED_MODELS` in `models/auto/configuration_auto.py` is up to date. """ deprecated_folder = os.path.join(PATH_TO_TRANSFORMERS, "models", "deprecated") deprecated_models = [m for m in os.listdir(deprecated_folder) if not m.startswith("_")] constant_to_check = transformers.models.auto.configuration_auto.DEPRECATED_MODELS message = [] missing_models = sorted(set(deprecated_models) - set(constant_to_check)) if len(missing_models) != 0: missing_models = ", ".join(missing_models) message.append( "The following models are in the deprecated folder, make sure to add them to `DEPRECATED_MODELS` in " f"`models/auto/configuration_auto.py`: {missing_models}." ) extra_models = sorted(set(constant_to_check) - set(deprecated_models)) if len(extra_models) != 0: extra_models = ", ".join(extra_models) message.append( "The following models are in the `DEPRECATED_MODELS` constant but not in the deprecated folder. Either " f"remove them from the constant or move to the deprecated folder: {extra_models}." ) if len(message) > 0: raise Exception("\n".join(message)) def check_repo_quality(): """Check all models are properly tested and documented.""" print("Checking all models are included.") check_model_list() print("Checking all models are public.") check_models_are_in_init() print("Checking all models are properly tested.") check_all_decorator_order() check_all_models_are_tested() print("Checking all objects are properly documented.") check_all_objects_are_documented() print("Checking all models are in at least one auto class.") check_all_models_are_auto_configured() print("Checking all names in auto name mappings are defined.") check_all_auto_object_names_being_defined() print("Checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.") check_all_auto_mapping_names_in_config_mapping_names() print("Checking all auto mappings could be imported.") check_all_auto_mappings_importable() print("Checking all objects are equally (across frameworks) in the main __init__.") check_objects_being_equally_in_main_init() print("Checking the DEPRECATED_MODELS constant is up to date.") check_deprecated_constant_is_up_to_date() if __name__ == "__main__": check_repo_quality()
transformers/utils/check_repo.py/0
{ "file_path": "transformers/utils/check_repo.py", "repo_id": "transformers", "token_count": 19844 }
483
# hello world experiment python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --log_with wandb" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/dpo.py --model_name_or_path=gpt2 --per_device_train_batch_size 4 --max_steps 1000 --learning_rate 1e-3 --gradient_accumulation_steps 1 --logging_steps 10 --eval_steps 500 --output_dir="dpo_anthropic_hh" --optim adamw_torch --warmup_steps 150 --report_to wandb --bf16 --logging_first_step --no_remove_unused_columns" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/sft.py --model_name_or_path="facebook/opt-350m" --report_to="wandb" --learning_rate=1.41e-5 --per_device_train_batch_size=64 --gradient_accumulation_steps=16 --output_dir="sft_openassistant-guanaco" --logging_steps=1 --num_train_epochs=3 --max_steps=-1 --push_to_hub --gradient_checkpointing" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/reward_modeling.py --model_name_or_path=facebook/opt-350m --output_dir="reward_modeling_anthropic_hh" --per_device_train_batch_size=64 --num_train_epochs=1 --gradient_accumulation_steps=16 --gradient_checkpointing=True --learning_rate=1.41e-5 --report_to="wandb" --remove_unused_columns=False --optim="adamw_torch" --logging_steps=10 --eval_strategy="steps" --max_length=512" \ --num-seeds 3 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template
trl/benchmark/benchmark_level1.sh/0
{ "file_path": "trl/benchmark/benchmark_level1.sh", "repo_id": "trl", "token_count": 905 }
484
# Aligning Text-to-Image Diffusion Models with Reward Backpropagation ## The why If your reward function is differentiable, directly backpropagating gradients from the reward models to the diffusion model is significantly more sample and compute efficient (25x) than doing policy gradient algorithm like DDPO. AlignProp does full backpropagation through time, which allows updating the earlier steps of denoising via reward backpropagation. <div style="text-align: center"><img src="https://align-prop.github.io/reward_tuning.png"/></div> ## Getting started with `examples/scripts/alignprop.py` The `alignprop.py` script is a working example of using the `AlignProp` trainer to finetune a Stable Diffusion model. This example explicitly configures a small subset of the overall parameters associated with the config object (`AlignPropConfig`). **Note:** one A100 GPU is recommended to get this running. For lower memory setting, consider setting truncated_backprop_rand to False. With default settings this will do truncated backpropagation with K=1. Almost every configuration parameter has a default. There is only one commandline flag argument that is required of the user to get things up and running. The user is expected to have a [huggingface user access token](https://huggingface.co/docs/hub/security-tokens) that will be used to upload the model post finetuning to HuggingFace hub. The following bash command is to be entered to get things running ```batch python alignprop.py --hf_user_access_token <token> ``` To obtain the documentation of `stable_diffusion_tuning.py`, please run `python stable_diffusion_tuning.py --help` The following are things to keep in mind (The code checks this for you as well) in general while configuring the trainer (beyond the use case of using the example script) - The configurable randomized truncation range (`--alignprop_config.truncated_rand_backprop_minmax=(0,50)`) the first number should be equal and greater to 0, while the second number should equal or less to the number of diffusion timesteps (sample_num_steps) - The configurable truncation backprop absolute step (`--alignprop_config.truncated_backprop_timestep=49`) the number should be less than the number of diffusion timesteps (sample_num_steps), it only matters when truncated_backprop_rand is set to False ## Setting up the image logging hook function Expect the function to be given a dictionary with keys ```python ['image', 'prompt', 'prompt_metadata', 'rewards'] ``` and `image`, `prompt`, `prompt_metadata`, `rewards`are batched. You are free to log however you want the use of `wandb` or `tensorboard` is recommended. ### Key terms - `rewards` : The rewards/score is a numerical associated with the generated image and is key to steering the RL process - `prompt` : The prompt is the text that is used to generate the image - `prompt_metadata` : The prompt metadata is the metadata associated with the prompt. A situation where this will not be empty is when the reward model comprises of a [`FLAVA`](https://huggingface.co/docs/transformers/model_doc/flava) setup where questions and ground answers (linked to the generated image) are expected with the generated image (See here: https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/rewards.py#L45) - `image` : The image generated by the Stable Diffusion model Example code for logging sampled images with `wandb` is given below. ```python # for logging these images to wandb def image_outputs_hook(image_data, global_step, accelerate_logger): # For the sake of this example, we only care about the last batch # hence we extract the last element of the list result = {} images, prompts, rewards = [image_data['images'],image_data['prompts'],image_data['rewards']] for i, image in enumerate(images): pil = Image.fromarray( (image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8) ) pil = pil.resize((256, 256)) result[f"{prompts[i]:.25} | {rewards[i]:.2f}"] = [pil] accelerate_logger.log_images( result, step=global_step, ) ``` ### Using the finetuned model Assuming you've done with all the epochs and have pushed up your model to the hub, you can use the finetuned model as follows ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipeline.to("cuda") pipeline.load_lora_weights('mihirpd/alignprop-trl-aesthetics') prompts = ["squirrel", "crab", "starfish", "whale","sponge", "plankton"] results = pipeline(prompts) for prompt, image in zip(prompts,results.images): image.save(f"dump/{prompt}.png") ``` ## Credits This work is heavily influenced by the repo [here](https://github.com/mihirp1998/AlignProp/) and the associated paper [Aligning Text-to-Image Diffusion Models with Reward Backpropagation by Mihir Prabhudesai, Anirudh Goyal, Deepak Pathak, Katerina Fragkiadaki](https://huggingface.co/papers/2310.03739).
trl/docs/source/alignprop_trainer.mdx/0
{ "file_path": "trl/docs/source/alignprop_trainer.mdx", "repo_id": "trl", "token_count": 1505 }
485
# KTO Trainer TRL supports the Kahneman-Tversky Optimization (KTO) Trainer for aligning language models with binary feedback data (e.g., upvote/downvote), as described in the [paper](https://huggingface.co/papers/2402.01306) by Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. For a full example have a look at [`examples/scripts/kto.py`]. Depending on how good your base model is, you may or may not need to do SFT before KTO. This is different from standard RLHF and DPO, which always require SFT. ## Expected dataset format The KTO trainer expects a very specific format for the dataset as it does not require pairwise preferences. Since the model will be trained to directly optimize examples that consist of a prompt, model completion, and a label to indicate whether the completion is "good" or "bad", we expect a dataset with the following columns: - `prompt` - `completion` - `label` for example: ``` kto_dataset_dict = { "prompt": [ "Hey, hello", "How are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", ], "completion": [ "hi nice to meet you", "leave me alone", "I don't have a name", "My name is Mary", "Python", "C++", "Java", ], "label": [ True, False, False, True, True, False, False, ], } ``` where the `prompt` contains the context inputs, `completion` contains the corresponding responses and `label` contains the corresponding flag that indicates if the generated completion is desired (`True`) or undesired (`False`). A prompt can have multiple responses and this is reflected in the entries being repeated in the dictionary's value arrays. It is required that the dataset contains at least one desirable and one undesirable completion. ## Expected model format The KTO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function. ## Using the `KTOTrainer` For a detailed example have a look at the `examples/scripts/kto.py` script. At a high level we need to initialize the `KTOTrainer` with a `model` we wish to train and a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response. The `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder). The `desirable_weight` and `undesirable_weight` refer to the weights placed on the losses for desirable/positive and undesirable/negative examples. By default, they are both 1. However, if you have more of one or the other, then you should upweight the less common type such that the ratio of (`desirable_weight` * number of positives) to (`undesirable_weight` * number of negatives) is in the range 1:1 to 4:3. ```py training_args = KTOConfig( beta=0.1, desirable_weight=1.0, undesirable_weight=1.0, ) kto_trainer = KTOTrainer( model, ref_model, args=training_args, train_dataset=train_dataset, tokenizer=tokenizer, ) ``` After this one can then call: ```py kto_trainer.train() ``` ### For Mixture of Experts Models: Enabling the auxiliary loss MOEs are the most efficient if the load is about equally distributed between experts. To ensure that we train MOEs similarly during preference-tuning, it is beneficial to add the auxiliary loss from the load balancer to the final loss. This option is enabled by setting `output_router_logits=True` in the model config (e.g. MixtralConfig). To scale how much the auxiliary loss contributes to the total loss, use the hyperparameter `router_aux_loss_coef=...` (default: 0.001). ## KTOTrainer [[autodoc]] KTOTrainer ## KTOConfig [[autodoc]] KTOConfig
trl/docs/source/kto_trainer.mdx/0
{ "file_path": "trl/docs/source/kto_trainer.mdx", "repo_id": "trl", "token_count": 1292 }
486
# Trainer At TRL we support PPO (Proximal Policy Optimisation) with an implementation that largely follows the structure introduced in the paper "Fine-Tuning Language Models from Human Preferences" by D. Ziegler et al. [[paper](https://huggingface.co/papers/1909.08593), [code](https://github.com/openai/lm-human-preferences)]. The Trainer and model classes are largely inspired from `transformers.Trainer` and `transformers.AutoModel` classes and adapted for RL. We also support a `RewardTrainer` that can be used to train a reward model. ## CPOConfig [[autodoc]] CPOConfig ## CPOTrainer [[autodoc]] CPOTrainer ## DDPOConfig [[autodoc]] DDPOConfig ## DDPOTrainer [[autodoc]] DDPOTrainer ## DPOTrainer [[autodoc]] DPOTrainer ## IterativeSFTTrainer [[autodoc]] IterativeSFTTrainer ## KTOConfig [[autodoc]] KTOConfig ## KTOTrainer [[autodoc]] KTOTrainer ## ORPOConfig [[autodoc]] ORPOConfig ## ORPOTrainer [[autodoc]] ORPOTrainer ## PPOConfig [[autodoc]] PPOConfig ## PPOTrainer [[autodoc]] PPOTrainer ## RewardConfig [[autodoc]] RewardConfig ## RewardTrainer [[autodoc]] RewardTrainer ## SFTTrainer [[autodoc]] SFTTrainer ## set_seed [[autodoc]] set_seed
trl/docs/source/trainer.mdx/0
{ "file_path": "trl/docs/source/trainer.mdx", "repo_id": "trl", "token_count": 425 }
487
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from peft import LoraConfig from transformers import AutoTokenizer, HfArgumentParser, load_tool from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment os.environ["HF_ALLOW_CODE_EVAL"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "false" @dataclass class ScriptArguments: model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "the number of gradient accumulation steps"} ) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) iterations: Optional[int] = field(default=1000, metadata={"help": "the number of iterations"}) seed: Optional[int] = field(default=0, metadata={"help": "the random seed"}) parser = HfArgumentParser(ScriptArguments) args = parser.parse_args_into_dataclasses()[0] lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["c_proj", "c_attn", "q_attn"], ) # set up models model = AutoModelForCausalLMWithValueHead.from_pretrained( args.model_name, use_auth_token=True, trust_remote_code=True, load_in_4bit=True, peft_config=lora_config, ) tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token # system prompt prompt = """\ Answer the following question: Q: In which branch of the arts is Patricia Neary famous? A: Ballets A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response> Result=Ballets<submit> Q: Who won Super Bowl XX? A: Chicago Bears A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response> Result=Chicago Bears<submit> Q: """ generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "eos_token_id": -1, "max_new_tokens": args.max_new_tokens, } # trainer config = PPOConfig( batch_size=args.batch_size, model_name=args.model_name, learning_rate=args.learning_rate, log_with=args.log_with, mini_batch_size=args.mini_batch_size, ppo_epochs=args.ppo_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, seed=args.seed, optimize_cuda_cache=True, ) ppo_trainer = PPOTrainer(config=config, model=model, tokenizer=tokenizer) dataset = load_dataset("trivia_qa", "rc", split="train") local_seed = args.seed + ppo_trainer.accelerator.process_index * 100003 # Prime dataset = dataset.shuffle(local_seed) def data_generator(): for i in range(len(dataset)): yield dataset[i]["question"], list(dataset[i]["answer"]["normalized_aliases"]) gen = data_generator() gen = iter(gen) def generate_data(n): tasks, answers = [], [] for _i in range(n): q, a = next(gen) tasks.append(q) answers.append(a) return tasks, answers def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] for response, answer in zip(responses, answers): reward = 0.0 for a in answer: if a.lower() in response.lower(): reward += 1.0 break rewards.append(torch.tensor(reward)) return rewards def tool_fn(x): # limit the amount of tokens return tool(x).split("\n")[1][:600] # text env tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") text_env = TextEnvironment( model, tokenizer, {"Wiki": tool_fn}, exact_match_reward, prompt, generation_kwargs=generation_kwargs, max_tool_reponse=400, ) def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) print_trainable_parameters(model) # main training loop for i in range(args.iterations): tasks, answers = generate_data(config.batch_size) queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) train_stats = ppo_trainer.step(queries, responses, rewards, masks) response_texts = [tokenizer.decode(response) for response in responses] query_texts = [tokenizer.decode(query) for query in queries] texts = { "query": [qt.split("<submit>")[-1].strip() for qt in query_texts], "response": response_texts, "answer": [", ".join(item) for item in answers], } all_rewards = ppo_trainer.accelerator.gather(torch.tensor(rewards, device=ppo_trainer.accelerator.device)) ppo_trainer.log_stats(train_stats, texts, list(all_rewards), columns_to_log=["query", "response", "answer"]) if i % 100 == 0: ppo_trainer.save_pretrained(f"models/{args.model_name}_{args.seed}_{i}_triviaqa")
trl/examples/research_projects/tools/triviaqa.py/0
{ "file_path": "trl/examples/research_projects/tools/triviaqa.py", "repo_id": "trl", "token_count": 2555 }
488
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ python examples/scripts/ppo.py \ --log_with=wandb """ from dataclasses import dataclass, field from typing import Optional import torch from accelerate import Accelerator, PartialState from datasets import load_dataset from peft import LoraConfig from tqdm import tqdm from transformers import AutoTokenizer, HfArgumentParser, pipeline from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, set_seed from trl.core import LengthSampler from trl.import_utils import is_npu_available, is_xpu_available tqdm.pandas() @dataclass class ScriptArguments: use_seq2seq: bool = field(default=False, metadata={"help": "whether to use seq2seq"}) trust_remote_code: bool = field(default=False, metadata={"help": "Enable `trust_remote_code`"}) # LoraConfig use_peft: bool = field(default=False, metadata={"help": "whether to use peft"}) lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) lora_r: Optional[int] = field(default=16, metadata={"help": "the lora r parameter"}) parser = HfArgumentParser((ScriptArguments, PPOConfig)) args, ppo_config = parser.parse_args_into_dataclasses() # We then define the arguments to pass to the sentiment analysis pipeline. # We set `return_all_scores` to True to get the sentiment score for each token. sent_kwargs = {"return_all_scores": True, "function_to_apply": "none", "batch_size": 16} trl_model_class = AutoModelForCausalLMWithValueHead if not args.use_seq2seq else AutoModelForSeq2SeqLMWithValueHead tokenizer = AutoTokenizer.from_pretrained(ppo_config.model_name) tokenizer.pad_token = tokenizer.eos_token # Below is an example function to build the dataset. In our case, we use the IMDB dataset # from the `datasets` library. One should customize this function to train the model on # its own dataset. def build_dataset(query_dataset, dataset_num_proc, input_min_text_length=2, input_max_text_length=8): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: query_dataset (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ # load imdb with datasets ds = load_dataset(query_dataset, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, num_proc=dataset_num_proc) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, num_proc=dataset_num_proc) ds.set_format(type="torch") return ds # We retrieve the dataloader by calling the `build_dataset` function. # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): dataset = build_dataset(ppo_config.query_dataset, ppo_config.dataset_num_proc) def collator(data): return {key: [d[key] for d in data] for key in data[0]} # set seed before initializing value head for deterministic eval set_seed(ppo_config.seed) # Now let's build the model, the reference model, and the tokenizer. if not args.use_peft: ref_model = trl_model_class.from_pretrained(ppo_config.model_name, trust_remote_code=args.trust_remote_code) device_map = None peft_config = None else: peft_config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, bias="none", task_type="CAUSAL_LM", ) ref_model = None # Copy the model to each device device_map = {"": Accelerator().local_process_index} model = trl_model_class.from_pretrained( ppo_config.model_name, trust_remote_code=args.trust_remote_code, device_map=device_map, peft_config=peft_config, ) tokenizer = AutoTokenizer.from_pretrained(ppo_config.model_name) # Some tokenizers like GPT-2's don't have a padding token by default, so we set one here. tokenizer.pad_token_id = tokenizer.eos_token_id # We then build the PPOTrainer, passing the model, the reference model, the tokenizer ppo_trainer = PPOTrainer(ppo_config, model, ref_model, tokenizer, dataset=dataset, data_collator=collator) # We then build the sentiment analysis pipeline, passing the model name and the # sentiment analysis pipeline arguments. Let's also make sure to set the device # to the same device as the PPOTrainer. device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: if is_xpu_available(): device = "xpu:0" elif is_npu_available(): device = "npu:0" else: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a `pipeline` bug ds_plugin = ppo_trainer.accelerator.state.deepspeed_plugin task, model_name = ppo_config.reward_model.split(":") if ds_plugin is not None and ds_plugin.is_zero3_init_enabled(): with ds_plugin.zero3_init_context_manager(enable=False): sentiment_pipe = pipeline(task, model=model_name, device=device) else: sentiment_pipe = pipeline(task, model=model_name, device=device) # Some tokenizers like GPT-2's don't have a padding token by default, so we set one here. if sentiment_pipe.tokenizer.pad_token_id is None: sentiment_pipe.tokenizer.pad_token_id = tokenizer.pad_token_id if sentiment_pipe.model.config.pad_token_id is None: sentiment_pipe.model.config.pad_token_id = tokenizer.pad_token_id # We then define the arguments to pass to the `generate` function. These arguments # are passed to the `generate` function of the PPOTrainer, which is a wrapper around # the `generate` function of the trained model. generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "max_new_tokens": 32, } for batch in tqdm(ppo_trainer.dataloader): query_tensors = batch["input_ids"] # Get response from gpt2 response_tensors, ref_response_tensors = ppo_trainer.generate( query_tensors, return_prompt=False, generate_ref_response=True, **generation_kwargs ) batch["response"] = tokenizer.batch_decode(response_tensors) batch["ref_response"] = tokenizer.batch_decode(ref_response_tensors) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[1]["score"]) for output in pipe_outputs] ref_texts = [q + r for q, r in zip(batch["query"], batch["ref_response"])] ref_pipe_outputs = sentiment_pipe(ref_texts, **sent_kwargs) ref_rewards = [torch.tensor(output[1]["score"]) for output in ref_pipe_outputs] batch["ref_rewards"] = ref_rewards # Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards, columns_to_log=["query", "response", "ref_response", "ref_rewards"])
trl/examples/scripts/ppo.py/0
{ "file_path": "trl/examples/scripts/ppo.py", "repo_id": "trl", "token_count": 2796 }
489
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import patch import torch from transformers import AutoTokenizer from trl import AutoModelForCausalLMWithValueHead, TextEnvironment, TextHistory class DummyTool: def __call__(self, text): return text def dummy_generate(histories): for i in range(len(histories)): histories[i].append_segment("<request><DummyTool>test<call>", torch.tensor([1, 2, 3]), system=False) return histories class TextHistoryTest(unittest.TestCase): def test_text_history_init(self): text = "Hello there!" tokens = torch.tensor([1, 2, 3]) history = TextHistory(text, tokens) assert history.text == text assert torch.equal(history.tokens, tokens) assert torch.equal(history.token_masks, torch.zeros_like(tokens)) history = TextHistory(text, tokens, system=False) assert torch.equal(history.token_masks, torch.ones_like(tokens)) def test_text_history_append_segment(self): text = "Hello there!" tokens = torch.tensor([1, 2, 3]) history = TextHistory(text, tokens) history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]), system=False) assert history.text == (text + "General Kenobi!") assert torch.equal(history.tokens, torch.tensor([1, 2, 3, 4, 5, 6])) assert torch.equal(history.token_masks, torch.tensor([0, 0, 0, 1, 1, 1])) history.append_segment("You are a bold one!", torch.tensor([7, 8, 9])) assert history.text == ((text + "General Kenobi!") + "You are a bold one!") assert torch.equal(history.tokens, torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9])) assert torch.equal(history.token_masks, torch.tensor([0, 0, 0, 1, 1, 1, 0, 0, 0])) def test_text_history_complete(self): text = "Hello there!" tokens = torch.tensor([1, 2, 3]) history = TextHistory(text, tokens) history.complete() assert history.completed assert not history.truncated history.complete(truncated=True) assert history.completed assert history.truncated def test_text_history_last_segment(self): text = "Hello there!" tokens = torch.tensor([1, 2, 3]) history = TextHistory(text, tokens) history.append_segment("General Kenobi!", torch.tensor([4, 5, 6])) history.append_segment("You are a bold one!", torch.tensor([7, 8, 9])) assert history.last_text_segment == "You are a bold one!" def test_text_history_split_query_response(self): text = "Hello there!" tokens = torch.tensor([1, 2, 3]) history = TextHistory(text, tokens) history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]), system=False) history.append_segment("You are a bold one!", torch.tensor([7, 8, 9]), system=True) query, response, mask = history.split_query_response_tokens() assert torch.equal(query, torch.tensor([1, 2, 3])) assert torch.equal(response, torch.tensor([4, 5, 6, 7, 8, 9])) assert torch.equal(mask, torch.tensor([1, 1, 1, 0, 0, 0])) class TextEnvironmentTester(unittest.TestCase): def setUp(self): # model_id self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" # get models and tokenizer self.gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.model_id) self.gpt2_tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.gpt2_tokenizer.pad_token = self.gpt2_tokenizer.eos_token def test_text_environment_setup(self): env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools=[DummyTool()], reward_fn=lambda x: torch.tensor(1), prompt="I am a prompt!\n", ) assert env.prompt == "I am a prompt!\n" assert list(env.tools.keys()) == ["DummyTool"] assert isinstance(env.tools["DummyTool"], DummyTool) assert env.reward_fn("Hello there!") == 1 def test_text_environment_generate(self): generation_kwargs = {"do_sample": False, "max_new_tokens": 4, "pad_token_id": self.gpt2_tokenizer.eos_token_id} env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools=[DummyTool()], reward_fn=lambda x: torch.tensor(1), prompt="I am a prompt!\n", generation_kwargs=generation_kwargs, ) input_texts = ["this is a test", "this is another, longer test"] model_inputs = [self.gpt2_tokenizer(txt, return_tensors="pt").input_ids.squeeze() for txt in input_texts] generations_batched = env._generate_batched(model_inputs, batch_size=2) generations_batched = self.gpt2_tokenizer.batch_decode(generations_batched) generations_single = [env._generate_batched([inputs], batch_size=1)[0] for inputs in model_inputs] generations_single = self.gpt2_tokenizer.batch_decode(generations_single) assert generations_single == generations_batched def test_text_environment_tool_call_parsing(self): string_valid = "Something something <request><Tool1>Hello there!<call>" string_invalid_request = "Something something <Tool1>Hello there!<call>" string_invalid_call = "Something something <request><Tool1>Hello there!" string_invalid_tool = "Something something <request>|Tool2|Hello there!<call>" string_invalid_random = "<>abcdefghijklm<>nopqrstuvwxyz<>" env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools=[DummyTool()], reward_fn=lambda x: torch.tensor(1), prompt="I am a prompt!\n", ) tool, response = env.parse_tool_call(string_valid) assert tool == "Tool1" assert response == "Hello there!" tool, response = env.parse_tool_call(string_invalid_request) assert tool is None assert response is None tool, response = env.parse_tool_call(string_invalid_call) assert tool is None assert response is None tool, response = env.parse_tool_call(string_invalid_tool) assert tool is None assert response is None tool, response = env.parse_tool_call(string_invalid_random) assert tool is None assert response is None def test_text_environment_tool_truncation(self): env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools={"dummy": lambda x: "a" * 1000}, reward_fn=lambda x: torch.tensor(1), prompt="I am a prompt!\n", ) env.max_tool_response = 100 history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3]))) assert (len(history.last_text_segment) - len(env.response_token)) == 100 env.max_tool_response = 500 history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3]))) assert (len(history.last_text_segment) - len(env.response_token)) == 500 env.max_tool_response = 1001 history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3]))) assert (len(history.last_text_segment) - len(env.response_token)) == 1000 env.max_tool_response = 2000 history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3]))) assert (len(history.last_text_segment) - len(env.response_token)) == 1000 @patch.object(TextEnvironment, "generate", side_effect=dummy_generate) def test_text_environment_max_calls(self, mock_generate): env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools={"DummyTool": DummyTool()}, reward_fn=lambda x: [torch.tensor(1) for _ in x], prompt="I am a prompt!\n", ) env.max_turns = 1 _, _, _, _, histories = env.run(["test"]) assert histories[0].text == ( ("I am a prompt!\n" + "test") + (1 * "<request><DummyTool>test<call>test<response>") ) env.max_turns = 2 _, _, _, _, histories = env.run(["test"]) assert histories[0].text == ( ("I am a prompt!\n" + "test") + (2 * "<request><DummyTool>test<call>test<response>") ) env.max_turns = 4 _, _, _, _, histories = env.run(["test"]) assert histories[0].text == ( ("I am a prompt!\n" + "test") + (4 * "<request><DummyTool>test<call>test<response>") ) def test_text_environment_compute_rewards(self): env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools={"DummyTool": DummyTool()}, reward_fn=lambda x: [torch.tensor(i) for i, _ in enumerate(x)], prompt="I am a prompt!\n", ) histories = [TextHistory("<request><DummyTool>test<call>", torch.tensor([1, 2, 3])) for _ in range(8)] histories = env.compute_reward(histories) for i in range(8): assert histories[i].reward == i @patch.object(TextEnvironment, "generate", side_effect=dummy_generate) def test_text_environment_run(self, mock_generate): env = TextEnvironment( self.gpt2_model, self.gpt2_tokenizer, tools={"DummyTool": DummyTool()}, reward_fn=lambda x: [torch.tensor(i) for i, _ in enumerate(x)], prompt="I am a prompt!\n", max_turns=2, ) task_1 = "Hello there!" task_2 = "Hello there! General Kenobi!" query, response, response_mask, reward, histories = env.run([task_1, task_2]) assert len(query[0]) == 9 assert len(query[1]) == 12 assert len(response[0]) == 14 assert len(response[1]) == 14 assert response_mask[0].sum() == (2 * 3) # mocked generate always adds 3 toknes assert response_mask[1].sum() == (2 * 3) # mocked generate always adds 3 toknes assert reward[0] == 0 assert reward[1] == 1 assert histories[0].text == ( ("I am a prompt!\n" + "Hello there!") + (2 * "<request><DummyTool>test<call>test<response>") ) assert histories[1].text == ( ("I am a prompt!\n" + "Hello there! General Kenobi!") + (2 * "<request><DummyTool>test<call>test<response>") )
trl/tests/test_environments.py/0
{ "file_path": "trl/tests/test_environments.py", "repo_id": "trl", "token_count": 4797 }
490
import unittest import torch from trl import is_peft_available from trl.trainer.model_config import ModelConfig from trl.trainer.utils import get_peft_config, pad if is_peft_available(): from peft import LoraConfig from .testing_utils import require_peft class TestPad(unittest.TestCase): def test_pad_1_dim_left(self): x = torch.tensor([1, 2, 3]) y = torch.tensor([4, 5]) output = pad((x, y), padding_value=0, padding_side="left") expected = torch.tensor([[1, 2, 3], [0, 4, 5]]) self.assertTrue(torch.equal(output, expected)) def test_pad_1_dim_right(self): x = torch.tensor([1, 2, 3]) y = torch.tensor([4, 5]) output = pad((x, y), padding_value=0, padding_side="right") expected = torch.tensor([[1, 2, 3], [4, 5, 0]]) self.assertTrue(torch.equal(output, expected)) def test_pad_2_dim_left(self): x = torch.tensor([[1, 2], [3, 4]]) y = torch.tensor([[5, 6]]) output = pad((x, y), padding_value=0, padding_side="left") expected = torch.tensor( [ [[1, 2], [3, 4]], [[0, 0], [5, 6]], ] ) self.assertTrue(torch.equal(output, expected)) def test_pad_2_dim_right(self): x = torch.tensor([[1, 2], [3, 4]]) y = torch.tensor([[5, 6]]) output = pad((x, y), padding_value=0, padding_side="right") expected = torch.tensor( [ [[1, 2], [3, 4]], [[5, 6], [0, 0]], ] ) self.assertTrue(torch.equal(output, expected)) def test_pad_2_dim_right_multidim(self): x = torch.tensor([[1, 2], [3, 4]]) y = torch.tensor([[5]]) output = pad((x, y), padding_value=0, padding_side="right") expected = torch.tensor( [ [[1, 2], [3, 4]], [[5, 0], [0, 0]], ] ) self.assertTrue(torch.equal(output, expected)) @require_peft class TestGetPEFTConfig(unittest.TestCase): def test_create_peft_config_use_peft_false(self): """Test that when use_peft is False, the function returns None.""" model_config = ModelConfig(use_peft=False) peft_config = get_peft_config(model_config) self.assertIsNone(peft_config) def test_create_peft_config_use_peft_true(self): """Test that when use_peft is True, the function returns a LoraConfig object.""" # Provide non-default values to the model config for testing peft_kwargs = { "lora_r": 8, "lora_alpha": 16, "lora_dropout": 0.1, "lora_task_type": "SEQ_CLS", "use_rslora": True, "lora_target_modules": ["up_proj", "down_proj"], "lora_modules_to_save": ["up_proj"], } model_config = ModelConfig(use_peft=True, **peft_kwargs) peft_config = get_peft_config(model_config) self.assertTrue(isinstance(peft_config, LoraConfig)) for arg, value in peft_kwargs.items(): # Test that lists of modules are converted to sets if arg == "lora_target_modules": value = set(value) # Rename the argument to match the LoraConfig attribute name if arg in ["lora_r", "lora_task_type", "lora_target_modules", "lora_modules_to_save"]: arg = arg[len("lora_") :] if arg.startswith("lora_") else arg self.assertEqual(getattr(peft_config, arg), value)
trl/tests/test_utils.py/0
{ "file_path": "trl/tests/test_utils.py", "repo_id": "trl", "token_count": 1741 }
491
# Copyright 2022 The HuggingFace Team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.nn as nn import torchvision from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError from transformers import CLIPModel from trl.import_utils import is_npu_available, is_xpu_available class MLP(nn.Module): def __init__(self): super().__init__() self.layers = nn.Sequential( nn.Linear(768, 1024), nn.Dropout(0.2), nn.Linear(1024, 128), nn.Dropout(0.2), nn.Linear(128, 64), nn.Dropout(0.1), nn.Linear(64, 16), nn.Linear(16, 1), ) def forward(self, embed): return self.layers(embed) class AestheticScorer(torch.nn.Module): """ This model attempts to predict the aesthetic score of an image. The aesthetic score is a numerical approximation of how much a specific image is liked by humans on average. This is from https://github.com/christophschuhmann/improved-aesthetic-predictor """ def __init__(self, *, dtype, model_id, model_filename): super().__init__() self.clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") self.normalize = torchvision.transforms.Normalize( mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711] ) self.target_size = 224 self.mlp = MLP() try: cached_path = hf_hub_download(model_id, model_filename) except EntryNotFoundError: cached_path = os.path.join(model_id, model_filename) state_dict = torch.load(cached_path, map_location=torch.device("cpu"), weights_only=True) self.mlp.load_state_dict(state_dict) self.dtype = dtype self.eval() def __call__(self, images): device = next(self.parameters()).device images = torchvision.transforms.Resize(self.target_size)(images) images = self.normalize(images).to(self.dtype).to(device) embed = self.clip.get_image_features(pixel_values=images) # normalize embedding embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True) reward = self.mlp(embed).squeeze(1) return reward def aesthetic_scorer(hub_model_id, model_filename): scorer = AestheticScorer( model_id=hub_model_id, model_filename=model_filename, dtype=torch.float32, ) if is_npu_available(): scorer = scorer.npu() elif is_xpu_available(): scorer = scorer.xpu() else: scorer = scorer.cuda() def _fn(images, prompts, metadata): images = (images).clamp(0, 1) scores = scorer(images) return scores, {} return _fn
trl/trl/models/auxiliary_modules.py/0
{ "file_path": "trl/trl/models/auxiliary_modules.py", "repo_id": "trl", "token_count": 1372 }
492
# Copyright 2023 DDPO-pytorch authors (Kevin Black), metric-space, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from collections import defaultdict from concurrent import futures from typing import Any, Callable, Optional, Tuple from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import whoami from ..models import DDPOStableDiffusionPipeline from . import BaseTrainer, DDPOConfig from .utils import PerPromptStatTracker logger = get_logger(__name__) MODEL_CARD_TEMPLATE = """--- license: apache-2.0 library_name: transformers tags: - trl - ddpo - diffusers - reinforcement-learning - text-to-image - stable-diffusion --- # {model_name} This is a diffusion model that has been fine-tuned with reinforcement learning to guide the model outputs according to a value, function, or human feedback. The model can be used for image generation conditioned with text. """ class DDPOTrainer(BaseTrainer): """ The DDPOTrainer uses Deep Diffusion Policy Optimization to optimise diffusion models. Note, this trainer is heavily inspired by the work here: https://github.com/kvablack/ddpo-pytorch As of now only Stable Diffusion based pipelines are supported Attributes: **config** (`DDPOConfig`) -- Configuration object for DDPOTrainer. Check the documentation of `PPOConfig` for more details. **reward_function** (Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor]) -- Reward function to be used **prompt_function** (Callable[[], Tuple[str, Any]]) -- Function to generate prompts to guide model **sd_pipeline** (`DDPOStableDiffusionPipeline`) -- Stable Diffusion pipeline to be used for training. **image_samples_hook** (Optional[Callable[[Any, Any, Any], Any]]) -- Hook to be called to log images """ _tag_names = ["trl", "ddpo"] def __init__( self, config: DDPOConfig, reward_function: Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor], prompt_function: Callable[[], Tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]] = None, ): if image_samples_hook is None: warn("No image_samples_hook provided; no images will be logged") self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if "checkpoint_" not in os.path.basename(self.config.resume_from): # get the most recent checkpoint in this directory checkpoints = list( filter( lambda x: "checkpoint_" in x, os.listdir(self.config.resume_from), ) ) if len(checkpoints) == 0: raise ValueError(f"No checkpoints found in {self.config.resume_from}") checkpoint_numbers = sorted([int(x.split("_")[-1]) for x in checkpoints]) self.config.resume_from = os.path.join( self.config.resume_from, f"checkpoint_{checkpoint_numbers[-1]}", ) accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 # number of timesteps within each trajectory to train on self.num_train_timesteps = int(self.config.sample_num_steps * self.config.train_timestep_fraction) self.accelerator = Accelerator( log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, # we always accumulate gradients across timesteps; we want config.train.gradient_accumulation_steps to be the # number of *samples* we accumulate across, so we need to multiply by the number of training timesteps to get # the total number of optimizer steps to accumulate across. gradient_accumulation_steps=self.config.train_gradient_accumulation_steps * self.num_train_timesteps, **self.config.accelerator_kwargs, ) is_okay, message = self._config_check() if not is_okay: raise ValueError(message) is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard" if self.accelerator.is_main_process: self.accelerator.init_trackers( self.config.tracker_project_name, config=dict(ddpo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs, ) logger.info(f"\n{config}") set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config( position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc="Timestep", dynamic_ncols=True, ) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. if self.accelerator.mixed_precision == "fp16": inference_dtype = torch.float16 elif self.accelerator.mixed_precision == "bf16": inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer( trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers ) self.neg_prompt_embed = self.sd_pipeline.text_encoder( self.sd_pipeline.tokenizer( [""] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) )[0] if config.per_prompt_stat_tracking: self.stat_tracker = PerPromptStatTracker( config.per_prompt_stat_tracking_buffer_size, config.per_prompt_stat_tracking_min_count, ) # NOTE: for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses # more memory self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, "use_lora") and self.sd_pipeline.use_lora: unet, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: self.trainable_layers, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) if self.config.async_reward_computation: self.executor = futures.ThreadPoolExecutor(max_workers=config.max_workers) if config.resume_from: logger.info(f"Resuming from {config.resume_from}") self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split("_")[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs, is_async=False): if not is_async: rewards = [] for images, prompts, prompt_metadata in prompt_image_pairs: reward, reward_metadata = self.reward_fn(images, prompts, prompt_metadata) rewards.append( ( torch.as_tensor(reward, device=self.accelerator.device), reward_metadata, ) ) else: rewards = self.executor.map(lambda x: self.reward_fn(*x), prompt_image_pairs) rewards = [ (torch.as_tensor(reward.result(), device=self.accelerator.device), reward_metadata.result()) for reward, reward_metadata in rewards ] return zip(*rewards) def step(self, epoch: int, global_step: int): """ Perform a single step of training. Args: epoch (int): The current epoch. global_step (int): The current global step. Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. - If `self.image_samples_callback` is not None, it will be called with the prompt_image_pairs, global_step, and the accelerator tracker. Returns: global_step (int): The updated global step. """ samples, prompt_image_data = self._generate_samples( iterations=self.config.sample_num_batches_per_epoch, batch_size=self.config.sample_batch_size, ) # collate samples into dict where each entry has shape (num_batches_per_epoch * sample.batch_size, ...) samples = {k: torch.cat([s[k] for s in samples]) for k in samples[0].keys()} rewards, rewards_metadata = self.compute_rewards( prompt_image_data, is_async=self.config.async_reward_computation ) for i, image_data in enumerate(prompt_image_data): image_data.extend([rewards[i], rewards_metadata[i]]) if self.image_samples_callback is not None: self.image_samples_callback(prompt_image_data, global_step, self.accelerator.trackers[0]) rewards = torch.cat(rewards) rewards = self.accelerator.gather(rewards).cpu().numpy() self.accelerator.log( { "reward": rewards, "epoch": epoch, "reward_mean": rewards.mean(), "reward_std": rewards.std(), }, step=global_step, ) if self.config.per_prompt_stat_tracking: # gather the prompts across processes prompt_ids = self.accelerator.gather(samples["prompt_ids"]).cpu().numpy() prompts = self.sd_pipeline.tokenizer.batch_decode(prompt_ids, skip_special_tokens=True) advantages = self.stat_tracker.update(prompts, rewards) else: advantages = (rewards - rewards.mean()) / (rewards.std() + 1e-8) # ungather advantages; keep the entries corresponding to the samples on this process samples["advantages"] = ( torch.as_tensor(advantages) .reshape(self.accelerator.num_processes, -1)[self.accelerator.process_index] .to(self.accelerator.device) ) del samples["prompt_ids"] total_batch_size, num_timesteps = samples["timesteps"].shape for inner_epoch in range(self.config.train_num_inner_epochs): # shuffle samples along batch dimension perm = torch.randperm(total_batch_size, device=self.accelerator.device) samples = {k: v[perm] for k, v in samples.items()} # shuffle along time dimension independently for each sample # still trying to understand the code below perms = torch.stack( [torch.randperm(num_timesteps, device=self.accelerator.device) for _ in range(total_batch_size)] ) for key in ["timesteps", "latents", "next_latents", "log_probs"]: samples[key] = samples[key][ torch.arange(total_batch_size, device=self.accelerator.device)[:, None], perms, ] original_keys = samples.keys() original_values = samples.values() # rebatch them as user defined train_batch_size is different from sample_batch_size reshaped_values = [v.reshape(-1, self.config.train_batch_size, *v.shape[1:]) for v in original_values] # Transpose the list of original values transposed_values = zip(*reshaped_values) # Create new dictionaries for each row of transposed values samples_batched = [dict(zip(original_keys, row_values)) for row_values in transposed_values] self.sd_pipeline.unet.train() global_step = self._train_batched_samples(inner_epoch, epoch, global_step, samples_batched) # ensure optimization step at the end of the inner epoch if not self.accelerator.sync_gradients: raise ValueError( "Optimization step should have been performed by this point. Please check calculated gradient accumulation settings." ) if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, latents, timesteps, next_latents, log_probs, advantages, embeds): """ Calculate the loss for a batch of an unpacked sample Args: latents (torch.Tensor): The latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width] timesteps (torch.Tensor): The timesteps sampled from the diffusion model, shape: [batch_size] next_latents (torch.Tensor): The next latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width] log_probs (torch.Tensor): The log probabilities of the latents, shape: [batch_size] advantages (torch.Tensor): The advantages of the latents, shape: [batch_size] embeds (torch.Tensor): The embeddings of the prompts, shape: [2*batch_size or batch_size, ...] Note: the "or" is because if train_cfg is True, the expectation is that negative prompts are concatenated to the embeds Returns: loss (torch.Tensor), approx_kl (torch.Tensor), clipfrac (torch.Tensor) (all of these are of shape (1,)) """ with self.autocast(): if self.config.train_cfg: noise_pred = self.sd_pipeline.unet( torch.cat([latents] * 2), torch.cat([timesteps] * 2), embeds, ).sample noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.config.sample_guidance_scale * ( noise_pred_text - noise_pred_uncond ) else: noise_pred = self.sd_pipeline.unet( latents, timesteps, embeds, ).sample # compute the log prob of next_latents given latents under the current model scheduler_step_output = self.sd_pipeline.scheduler_step( noise_pred, timesteps, latents, eta=self.config.sample_eta, prev_sample=next_latents, ) log_prob = scheduler_step_output.log_probs advantages = torch.clamp( advantages, -self.config.train_adv_clip_max, self.config.train_adv_clip_max, ) ratio = torch.exp(log_prob - log_probs) loss = self.loss(advantages, self.config.train_clip_range, ratio) approx_kl = 0.5 * torch.mean((log_prob - log_probs) ** 2) clipfrac = torch.mean((torch.abs(ratio - 1.0) > self.config.train_clip_range).float()) return loss, approx_kl, clipfrac def loss( self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor, ): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp( ratio, 1.0 - clip_range, 1.0 + clip_range, ) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls( trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon, ) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() # ensures that accelerate doesn't try to handle saving of the model def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() # ensures that accelerate doesn't try to handle loading of the model def _generate_samples(self, iterations, batch_size): """ Generate samples from the model Args: iterations (int): Number of iterations to generate samples for batch_size (int): Batch size to use for sampling Returns: samples (List[Dict[str, torch.Tensor]]), prompt_image_pairs (List[List[Any]]) """ samples = [] prompt_image_pairs = [] self.sd_pipeline.unet.eval() sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) for _ in range(iterations): prompts, prompt_metadata = zip(*[self.prompt_fn() for _ in range(batch_size)]) prompt_ids = self.sd_pipeline.tokenizer( prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] with self.autocast(): sd_output = self.sd_pipeline( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type="pt", ) images = sd_output.images latents = sd_output.latents log_probs = sd_output.log_probs latents = torch.stack(latents, dim=1) # (batch_size, num_steps + 1, ...) log_probs = torch.stack(log_probs, dim=1) # (batch_size, num_steps, 1) timesteps = self.sd_pipeline.scheduler.timesteps.repeat(batch_size, 1) # (batch_size, num_steps) samples.append( { "prompt_ids": prompt_ids, "prompt_embeds": prompt_embeds, "timesteps": timesteps, "latents": latents[:, :-1], # each entry is the latent before timestep t "next_latents": latents[:, 1:], # each entry is the latent after timestep t "log_probs": log_probs, "negative_prompt_embeds": sample_neg_prompt_embeds, } ) prompt_image_pairs.append([images, prompts, prompt_metadata]) return samples, prompt_image_pairs def _train_batched_samples(self, inner_epoch, epoch, global_step, batched_samples): """ Train on a batch of samples. Main training segment Args: inner_epoch (int): The current inner epoch epoch (int): The current epoch global_step (int): The current global step batched_samples (List[Dict[str, torch.Tensor]]): The batched samples to train on Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. Returns: global_step (int): The updated global step """ info = defaultdict(list) for _i, sample in enumerate(batched_samples): if self.config.train_cfg: # concat negative prompts to sample prompts to avoid two forward passes embeds = torch.cat([sample["negative_prompt_embeds"], sample["prompt_embeds"]]) else: embeds = sample["prompt_embeds"] for j in range(self.num_train_timesteps): with self.accelerator.accumulate(self.sd_pipeline.unet): loss, approx_kl, clipfrac = self.calculate_loss( sample["latents"][:, j], sample["timesteps"][:, j], sample["next_latents"][:, j], sample["log_probs"][:, j], sample["advantages"], embeds, ) info["approx_kl"].append(approx_kl) info["clipfrac"].append(clipfrac) info["loss"].append(loss) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_( self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if self.accelerator.sync_gradients: # log training-related stuff info = {k: torch.mean(torch.stack(v)) for k, v in info.items()} info = self.accelerator.reduce(info, reduction="mean") info.update({"epoch": epoch, "inner_epoch": inner_epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) return global_step def _config_check(self) -> Tuple[bool, str]: samples_per_epoch = ( self.config.sample_batch_size * self.accelerator.num_processes * self.config.sample_num_batches_per_epoch ) total_train_batch_size = ( self.config.train_batch_size * self.accelerator.num_processes * self.config.train_gradient_accumulation_steps ) if not self.config.sample_batch_size >= self.config.train_batch_size: return ( False, f"Sample batch size ({self.config.sample_batch_size}) must be greater than or equal to the train batch size ({self.config.train_batch_size})", ) if not self.config.sample_batch_size % self.config.train_batch_size == 0: return ( False, f"Sample batch size ({self.config.sample_batch_size}) must be divisible by the train batch size ({self.config.train_batch_size})", ) if not samples_per_epoch % total_train_batch_size == 0: return ( False, f"Number of samples per epoch ({samples_per_epoch}) must be divisible by the total train batch size ({total_train_batch_size})", ) return True, "" def train(self, epochs: Optional[int] = None): """ Train the model for a given number of epochs """ global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def create_model_card(self, path: str, model_name: Optional[str] = "TRL DDPO Model") -> None: """Creates and saves a model card for a TRL model. Args: path (`str`): The path to save the model card to. model_name (`str`, *optional*): The name of the model, defaults to `TRL DDPO Model`. """ try: user = whoami()["name"] # handle the offline case except Exception: warnings.warn("Cannot retrieve user information assuming you are running in offline mode.") return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}") with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f: f.write(model_card_content) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card(save_directory)
trl/trl/trainer/ddpo_trainer.py/0
{ "file_path": "trl/trl/trainer/ddpo_trainer.py", "repo_id": "trl", "token_count": 12357 }
493
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional from transformers import TrainingArguments @dataclass class RewardConfig(TrainingArguments): """ RewardConfig collects all training arguments related to the [`RewardTrainer`] class. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int`, *optional*, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. gradient_checkpointing (`bool`, *optional*, defaults to `True`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. """ max_length: Optional[int] = None """The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.""" dataset_num_proc: Optional[int] = None """Coefficient to incentivize the reward model to output mean-zero rewards (proposed by https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.""" center_rewards_coefficient: Optional[float] = None
trl/trl/trainer/reward_config.py/0
{ "file_path": "trl/trl/trainer/reward_config.py", "repo_id": "trl", "token_count": 551 }
494
repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.2.1 hooks: - id: ruff args: - --fix - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-merge-conflict - id: check-yaml
accelerate/.pre-commit-config.yaml/0
{ "file_path": "accelerate/.pre-commit-config.yaml", "repo_id": "accelerate", "token_count": 158 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Handling big models for inference When loading a pre-trained model in PyTorch, the usual workflow looks like this: ```py import torch my_model = ModelClass(...) state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` In plain English, those steps are: 1. Create the model with randomly initialized weights 2. Load the model weights (in a dictionary usually called a state dict) from the disk 3. Load those weights inside the model While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pre-trained weights. If you're loading a model with 6 billion parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16). <Tip warning={true}> This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future. </Tip> ## How the Process Works: A Quick Overview <Youtube id="MWCSGj9jEAo" /> ## How the Process Works: Working with Code ### Instantiating an empty model The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works: ```py from accelerate import init_empty_weights with init_empty_weights(): my_model = ModelClass(...) ``` For instance: ```py with init_empty_weights(): model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved to that device. <Tip warning={true}> You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device. </Tip> ### Sharded checkpoints It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards. 🤗 Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing: ```bash first_state_dict.bin index.json second_state_dict.bin ``` with index.json being the following file: ``` { "linear1.weight": "first_state_dict.bin", "linear1.bias": "first_state_dict.bin", "linear2.weight": "second_state_dict.bin", "linear2.bias": "second_state_dict.bin" } ``` and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"linear1.bias"`, `second_state_dict.bin` the ones for `"linear2.weight"` and `"linear2.bias"` ### Loading weights The second tool 🤗 Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard. If you want to use big model inference with 🤗 Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model. Let's download the sharded version of this model. ```bash pip install huggingface_hub ``` ```py from huggingface_hub import snapshot_download checkpoint = "marcsun13/gpt2-xl-linear-sharded" weights_location = snapshot_download(repo_id=checkpoint) ``` In order to initialize the model, we will use the library minGPT. ```bash git clone https://github.com/karpathy/minGPT.git pip install minGPT/ ``` ```py from accelerate import init_empty_weights from mingpt.model import GPT model_config = GPT.get_default_config() model_config.model_type = 'gpt2-xl' model_config.vocab_size = 50257 model_config.block_size = 1024 with init_empty_weights(): model = GPT(model_config) ``` Then, load the checkpoint we just downloaded with: ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint=weights_location, device_map="auto", no_split_module_classes=['Block'] ) ``` By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatically where to put each layer of the model depending on the available resources: - first, we use the maximum space available on the GPU(s) - if we still need space, we store the remaining weights on the CPU - if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors #### `no_split_module_classes` This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that include a residual connection of some kind. #### The `device_map` You can see the `device_map` that 🤗 Accelerate picked by accessing the `hf_device_map` attribute of your model: ```py model.hf_device_map ``` ```python out {'transformer.wte': 0, 'transformer.wpe': 0, 'transformer.drop': 0, 'transformer.h.0': 0, ... 'transformer.h.21': 0, 'transformer.h.22': 1, 'transformer.h.23': 1, 'transformer.h.24': 1, ... 'transformer.h.47': 1, 'transformer.ln_f': 1, 'lm_head': 1} ``` It's fully possible to create your own device map for the layers to use as well, specifying the GPU device to use (a number), `"cpu"`, or `"disk"` and pass this in: ```python device_map = { "transformer.wte": "cpu", "transformer.wpe": 0, "transformer.drop": "cpu", "transformer.h.0": "disk" } model = load_checkpoint_and_dispatch( model, checkpoint=weights_location, device_map=device_map ) ``` ### Run the model Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model: ```py from mingpt.bpe import BPETokenizer tokenizer = BPETokenizer() inputs = tokenizer("Hello, my name is").to(0) outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0] tokenizer.decode(outputs.cpu().squeeze()) ``` Behind the scenes, 🤗 Accelerate added hooks to the model, so that: - at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works) - for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after - for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after This way, your model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM! <Tip warning={true}> This only supports the inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations. </Tip> ### Designing a device map You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go. <Tip> You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device. </Tip> All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). When you have more GPU memory available than the model size, here is the difference between each option: - `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1. - `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models - `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to). <Tip> The options `"auto"` and `"balanced"` produce the same results for now, but the behavior of `"auto"` might change in the future if we find a strategy that makes more sense, while `"balanced"` will stay stable. </Tip> First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want to use for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`. Here is an example where we don't want to use more than 10GiB on each of the two GPUs and no more than 30GiB of CPU RAM for the model weights: ```python from accelerate import infer_auto_device_map device_map = infer_auto_device_map(my_model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"}) ``` <Tip warning={true}> When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage. Therefore when you create memory maps with `max_memory` make sure to adjust the available memory accordingly to avoid out-of-memory errors. </Tip> Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup, the close-to-ideal map is: ```python max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"} ``` as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0. If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance, if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be: ```python device_map = {"block1": 0, "block2": 1} ``` another one that is valid could be: ```python device_map = {"block1": 0, "block2.linear1": 0, "block2.linear2": 1, "block2.linear3": 1} ``` On the other hand, this one is not valid as it does not cover every parameter of the model: ```python device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1} ``` <Tip> To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs. </Tip> ## CPU offload only If you want to offload your model on CPU, you can use [`cpu_offload`]. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device and passed as they are needed, then offloaded again. ```python cpu_offload(model, execution_device) ``` You can also use [`cpu_offload_with_hook`]. This function will offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Furthermore, [`cpu_offload_with_hook`] is more performant but less memory saving. It is useful for pipelines running a model in a loop: ```python model_1, hook_1 = cpu_offload_with_hook(model_1, execution_device) model_2, hook_2 = cpu_offload_with_hook(model_2, execution_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, execution_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` ## Disk offload only To perform disk offload, you can use [`disk_offload`]. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. ```python disk_offload(model, offload_dir, execution_device) ``` ## Limits and further development We are aware of the current limitations in the API: - [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to a lack of RAM. - [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk. - [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys. - The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle. - When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before. - Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
accelerate/docs/source/concept_guides/big_model_inference.md/0
{ "file_path": "accelerate/docs/source/concept_guides/big_model_inference.md", "repo_id": "accelerate", "token_count": 4832 }
1
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Distributed Inference with 🤗 Accelerate Distributed inference can fall into three brackets: 1. Loading an entire model onto each GPU and sending chunks of a batch through each GPU's model copy at a time 2. Loading parts of a model onto each GPU and processing a single input at one time 3. Loading parts of a model onto each GPU and using what is called scheduled Pipeline Parallelism to combine the two prior techniques. We're going to go through the first and the last bracket, showcasing how to do each as they are more realistic scenarios. ## Sending chunks of a batch automatically to each loaded model This is the most memory-intensive solution, as it requires each GPU to keep a full copy of the model in memory at a given time. Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device. A basic pipeline using the `diffusers` library might look something like so: ```python import torch import torch.distributed as dist from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` Followed then by performing inference based on the specific prompt: ```python def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) pipe.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" result = pipe(prompt).images[0] result.save(f"result_{rank}.png") ``` One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious. A user might then also think that with 🤗 Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation)) Can it manage it? Yes. Does it add unneeded extra code however: also yes. With 🤗 Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`). This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential to be padded) for you to use right away. Let's rewrite the above example using this context manager: ```python from accelerate import PartialState # Can also be Accelerator or AcceleratorState from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipe(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` And then to launch the code, we can use the 🤗 Accelerate: If you have generated a config file to be used using `accelerate config`: ```bash accelerate launch distributed_inference.py ``` If you have a specific config file you want to use: ```bash accelerate launch --config_file my_config.json distributed_inference.py ``` Or if don't want to make any config files and launch on two GPUs: > Note: You will get some warnings about values being guessed based on your system. To remove these you can do `accelerate config default` or go through `accelerate config` to create a config file. ```bash accelerate launch --num_processes 2 distributed_inference.py ``` We've now reduced the boilerplate code needed to split this data to a few lines of code quite easily. But what if we have an odd distribution of prompts to GPUs? For example, what if we have 3 prompts, but only 2 GPUs? Under the context manager, the first GPU would receive the first two prompts and the second GPU the third, ensuring that all prompts are split and no overhead is needed. *However*, what if we then wanted to do something with the results of *all the GPUs*? (Say gather them all and perform some kind of post processing) You can pass in `apply_padding=True` to ensure that the lists of prompts are padded to the same length, with extra data being taken from the last sample. This way all GPUs will have the same number of prompts, and you can then gather the results. <Tip> This is only needed when trying to perform an action such as gathering the results, where the data on each device needs to be the same length. Basic inference does not require this. </Tip> For instance: ```python from accelerate import PartialState # Can also be Accelerator or AcceleratorState from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt: result = pipe(prompt).images ``` On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`. Make sure to drop the final sample, as it will be a duplicate of the previous one. You can find more complex examples [here](https://github.com/huggingface/accelerate/tree/main/examples/inference/distributed) such as how to use it with LLMs. ## Memory-efficient pipeline parallelism (experimental) This next part will discuss using *pipeline parallelism*. This is an **experimental** API utilizing the [PiPPy library by PyTorch](https://github.com/pytorch/PiPPy/) as a native solution. The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository: ![PiPPy example](https://camo.githubusercontent.com/681d7f415d6142face9dd1b837bdb2e340e5e01a58c3a4b119dea6c0d99e2ce0/68747470733a2f2f692e696d6775722e636f6d2f657955633934372e706e67) To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs. Before you proceed, please make sure you have the latest pippy installed by running the following: ```bash pip install torchpippy ``` We require at least version 0.2.0. To confirm that you have the correct version, run `pip show torchpippy`. Start by creating the model on the CPU: ```{python} from transformers import GPT2ForSequenceClassification, GPT2Config config = GPT2Config() model = GPT2ForSequenceClassification(config) model.eval() ``` Next you'll need to create some example inputs to use. These help PiPPy trace the model. <Tip warning={true}> However you make this example will determine the relative batch size that will be used/passed through the model at a given time, so make sure to remember how many items there are! </Tip> ```{python} input = torch.randint( low=0, high=config.vocab_size, size=(2, 1024), # bs x seq_len device="cpu", dtype=torch.int64, requires_grad=False, ) ``` Next we need to actually perform the tracing and get the model ready. To do so, use the [`inference.prepare_pippy`] function and it will fully wrap the model for pipeline parallelism automatically: ```{python} from accelerate.inference import prepare_pippy example_inputs = {"input_ids": input} model = prepare_pippy(model, example_args=(input,)) ``` <Tip> There are a variety of parameters you can pass through to `prepare_pippy`: * `split_points` lets you determine what layers to split the model at. By default we use wherever `device_map="auto" declares, such as `fc` or `conv1`. * `num_chunks` determines how the batch will be split and sent to the model itself (so `num_chunks=1` with four split points/four GPUs will have a naive MP where a single input gets passed between the four layer split points) </Tip> From here, all that's left is to actually perform the distributed inference! <Tip warning={true}> When passing inputs, we highly recommend to pass them in as a tuple of arguments. Using `kwargs` is supported, however, this approach is experimental. </Tip> ```{python} args = some_more_arguments with torch.no_grad(): output = model(*args) ``` When finished all the data will be on the last process only: ```{python} from accelerate import PartialState if PartialState().is_last_process: print(output) ``` <Tip> If you pass in `gather_output=True` to [`inference.prepare_pippy`], the output will be sent across to all the GPUs afterwards without needing the `is_last_process` check. This is `False` by default as it incurs a communication call. </Tip> And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference/pippy) and our [documentation](../package_reference/inference) as we work to improving this integration.
accelerate/docs/source/usage_guides/distributed_inference.md/0
{ "file_path": "accelerate/docs/source/usage_guides/distributed_inference.md", "repo_id": "accelerate", "token_count": 2940 }
2
# What are these scripts? All scripts in this folder originate from the `nlp_example.py` file, as it is a very simplistic NLP training example using Accelerate with zero extra features. From there, each further script adds in just **one** feature of Accelerate, showing how you can quickly modify your own scripts to implement these capabilities. A full example with all of these parts integrated together can be found in the `complete_nlp_example.py` script and `complete_cv_example.py` script. Adjustments to each script from the base `nlp_example.py` file can be found quickly by searching for "# New Code #" ## Example Scripts by Feature and their Arguments ### Base Example (`../nlp_example.py`) - Shows how to use `Accelerator` in an extremely simplistic PyTorch training loop - Arguments available: - `mixed_precision`, whether to use mixed precision. ("no", "fp16", or "bf16") - `cpu`, whether to train using only the CPU. (yes/no/1/0) All following scripts also accept these arguments in addition to their added ones. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0 ``` ### Checkpointing and Resuming Training (`checkpointing.py`) - Shows how to use `Accelerator.save_state` and `Accelerator.load_state` to save or continue training - **It is assumed you are continuing off the same training script** - Arguments available: - `checkpointing_steps`, after how many steps the various states should be saved. ("epoch", 1, 2, ...) - `output_dir`, where saved state folders should be saved to, default is current working directory - `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...) These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: (Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag) ```bash accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "checkpointing_tutorial" --resume_from_checkpoint "checkpointing_tutorial/epoch_0" ``` ### Cross Validation (`cross_validation.py`) - Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`. - Arguments available: - `num_folds`, the number of folds the training dataset should be split into. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./cross_validation.py --num_folds 2 ``` ### Experiment Tracking (`tracking.py`) - Shows how to use `Accelerate.init_trackers` and `Accelerator.log` - Can be used with Weights and Biases, TensorBoard, or CometML. - Arguments available: - `with_tracking`, whether to load in all available experiment trackers from the environment. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./tracking.py --with_tracking ``` ### Gradient Accumulation (`gradient_accumulation.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. - Arguments available: - `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5 ``` ### LocalSGD (`local_sgd.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. However, unlike gradient accumulation, this method does not change the effective batch size. Local SGD can be combined with gradient accumulation. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./local_sgd.py --local_sgd_steps 4 ``` ### DDP Communication Hook (`ddp_comm_hook.py`) - Shows how to use DDP Communication Hooks to control and optimize gradient communication across workers in a DistributedDataParallel setup. - Arguments available: - `ddp_comm_hook`, the type of DDP communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`. These arguments should be added at the end of any method for starting the python script (such as `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ./ddp_comm_hook.py --mixed_precision fp16 --ddp_comm_hook power_sgd ``` ### Profiler (`profiler.py`) - Shows how to use the profiling capabilities of `Accelerate` to profile PyTorch models during training. - Uses the `ProfileKwargs` handler to customize profiling options, including activities, scheduling, and additional profiling options. - Can generate and save profiling traces in JSON format for visualization in Chrome's tracing tool. Arguments available: - `--record_shapes`: If passed, records shapes for profiling. - `--profile_memory`: If passed, profiles memory usage. - `--with_stack`: If passed, profiles stack traces. - `--with_flops`: If passed, profiles floating point operations (FLOPS). - `--output_trace_dir`: If specified, saves the profiling trace to the given dir in JSON format. - `--cpu`: If passed, trains on the CPU instead of GPU. These arguments should be added at the end of any method for starting the Python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./profiler.py --record_shapes --profile_memory --with_flops --output_trace_dir "profiler" ```
accelerate/examples/by_feature/README.md/0
{ "file_path": "accelerate/examples/by_feature/README.md", "repo_id": "accelerate", "token_count": 1692 }
3
# Distributed inference examples This folder contains a variety of tutorials for running distributed inference with the following strategy: Load an entire model onto each GPU and sending chunks of a batch through each GPU’s model copy at a time ## Installation ```bash pip install accelerate torch ``` ## Running code You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script: ```bash accelerate launch --num_processes {NUM_GPUS} phi2.py ``` Or: ```bash torchrun --nproc-per-node {NUM_GPUS} phi2.py ```
accelerate/examples/inference/distributed/README.md/0
{ "file_path": "accelerate/examples/inference/distributed/README.md", "repo_id": "accelerate", "token_count": 174 }
4
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.34.0.dev0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .inference import prepare_pippy from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( AutocastKwargs, DataLoaderConfiguration, DDPCommunicationHookType, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, ProfileKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
accelerate/src/accelerate/__init__.py/0
{ "file_path": "accelerate/src/accelerate/__init__.py", "repo_id": "accelerate", "token_count": 500 }
5
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import importlib import logging import os import subprocess import sys from pathlib import Path import psutil import torch from accelerate.commands.config import default_config_file, load_config_from_file from accelerate.commands.config.config_args import SageMakerConfig from accelerate.commands.config.config_utils import DYNAMO_BACKENDS from accelerate.commands.utils import CustomArgumentParser from accelerate.state import get_int_from_env from accelerate.utils import ( ComputeEnvironment, DistributedType, PrepareForLaunch, _filter_args, check_cuda_p2p_ib_support, convert_dict_to_env_variables, is_bf16_available, is_deepspeed_available, is_mlu_available, is_musa_available, is_npu_available, is_rich_available, is_sagemaker_available, is_torch_version, is_torch_xla_available, is_xpu_available, patch_environment, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, str_to_bool, ) from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES if is_rich_available(): from rich import get_console from rich.logging import RichHandler FORMAT = "%(message)s" logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) logger = logging.getLogger(__name__) options_to_group = { "multi_gpu": "Distributed GPUs", "tpu": "TPU", "use_deepspeed": "DeepSpeed Arguments", "use_fsdp": "FSDP Arguments", "use_megatron_lm": "Megatron-LM Arguments", "fp8_backend": "FP8 Arguments", } def clean_option(option): "Finds all cases of - after the first two characters and changes them to _" if "fp8_backend" in option: option = "--fp8_backend" if option.startswith("--"): return option[2:].replace("-", "_") class CustomHelpFormatter(argparse.HelpFormatter): """ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is called. This is useful for the case where the user is using a specific platform and only wants to see the arguments for that platform. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.titles = [ "Hardware Selection Arguments", "Resource Selection Arguments", "Training Paradigm Arguments", "positional arguments", "optional arguments", ] def add_argument(self, action: argparse.Action): if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: args = sys.argv[2:] else: args = sys.argv[1:] if len(args) > 1: args = list(map(clean_option, args)) used_platforms = [arg for arg in args if arg in options_to_group.keys()] used_titles = [options_to_group[o] for o in used_platforms] if action.container.title not in self.titles + used_titles: action.help = argparse.SUPPRESS elif action.container.title == "Hardware Selection Arguments": if set(action.option_strings).isdisjoint(set(args)): action.help = argparse.SUPPRESS else: action.help = action.help + " (currently selected)" elif action.container.title == "Training Paradigm Arguments": if set(action.option_strings).isdisjoint(set(args)): action.help = argparse.SUPPRESS else: action.help = action.help + " (currently selected)" action.option_strings = [s for s in action.option_strings if "-" not in s[2:]] super().add_argument(action) def end_section(self): if len(self._current_section.items) < 2: self._current_section.items = [] self._current_section.heading = "" super().end_section() def launch_command_parser(subparsers=None): description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" if subparsers is not None: parser = subparsers.add_parser( "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter ) else: parser = CustomArgumentParser( "Accelerate launch command", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter, ) parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script.", ) parser.add_argument( "--quiet", "-q", action="store_true", help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", ) # Hardware selection arguments hardware_args = parser.add_argument_group( "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." ) hardware_args.add_argument( "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." ) hardware_args.add_argument( "--multi_gpu", default=False, action="store_true", help="Whether or not this should launch a distributed GPU training.", ) hardware_args.add_argument( "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." ) hardware_args.add_argument( "--ipex", default=False, action="store_true", help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.", ) # Resource selection arguments resource_args = parser.add_argument_group( "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." ) resource_args.add_argument( "--mixed_precision", type=str, choices=["no", "fp16", "bf16", "fp8"], help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", ) resource_args.add_argument( "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." ) resource_args.add_argument( "--num_machines", type=int, default=None, help="The total number of machines used in this training." ) resource_args.add_argument( "--num_cpu_threads_per_process", type=int, default=None, help="The number of CPU threads per process. Can be tuned for optimal performance.", ) resource_args.add_argument( "--enable_cpu_affinity", default=False, action="store_true", help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.", ) # Dynamo arguments resource_args.add_argument( "--dynamo_backend", type=str, choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], help="Choose a backend to optimize your training with dynamo, see more at " "https://github.com/pytorch/torchdynamo.", ) resource_args.add_argument( "--dynamo_mode", type=str, default="default", choices=TORCH_DYNAMO_MODES, help="Choose a mode to optimize your training with dynamo.", ) resource_args.add_argument( "--dynamo_use_fullgraph", default=False, action="store_true", help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", ) resource_args.add_argument( "--dynamo_use_dynamic", default=False, action="store_true", help="Whether to enable dynamic shape tracing.", ) # Training Paradigm arguments paradigm_args = parser.add_argument_group( "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." ) paradigm_args.add_argument( "--use_deepspeed", default=False, action="store_true", help="Whether to use deepspeed.", ) paradigm_args.add_argument( "--use_fsdp", default=False, action="store_true", help="Whether to use fsdp.", ) paradigm_args.add_argument( "--use_megatron_lm", default=False, action="store_true", help="Whether to use Megatron-LM.", ) paradigm_args.add_argument( "--use_xpu", default=False, action="store_true", help="Whether to use IPEX plugin to speed up training on XPU specifically.", ) # distributed GPU training arguments distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") distributed_args.add_argument( "--gpu_ids", default=None, help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", ) distributed_args.add_argument( "--same_network", default=False, action="store_true", help="Whether all machines used for multinode training exist on the same local network.", ) distributed_args.add_argument( "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." ) distributed_args.add_argument( "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." ) distributed_args.add_argument( "--main_process_port", type=int, default=None, help="The port to use to communicate with the machine of rank 0.", ) distributed_args.add_argument( "-t", "--tee", default="0", type=str, help="Tee std streams into a log file and also to console.", ) distributed_args.add_argument( "--log_dir", type=str, default=None, help=( "Base directory to use for log files when using torchrun/torch.distributed.run as launcher. " "Use with --tee to redirect std streams info log files." ), ) distributed_args.add_argument( "--role", type=str, default="default", help="User-defined role for the workers.", ) # Rendezvous related arguments distributed_args.add_argument( "--rdzv_backend", type=str, default="static", help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", ) distributed_args.add_argument( "--rdzv_conf", type=str, default="", help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).", ) distributed_args.add_argument( "--max_restarts", type=int, default=0, help="Maximum number of worker group restarts before failing.", ) distributed_args.add_argument( "--monitor_interval", type=float, default=0.1, help="Interval, in seconds, to monitor the state of workers.", ) parser.add_argument( "-m", "--module", action="store_true", help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", ) parser.add_argument( "--no_python", action="store_true", help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", ) # TPU arguments tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") tpu_args.add_argument( "--tpu_cluster", action="store_true", dest="tpu_use_cluster", help="Whether to use a GCP TPU pod for training.", ) tpu_args.add_argument( "--no_tpu_cluster", action="store_false", dest="tpu_use_cluster", help="Should not be passed explicitly, this is for internal use only.", ) tpu_args.add_argument( "--tpu_use_sudo", action="store_true", help="Whether to use `sudo` when running the TPU training script in each pod.", ) tpu_args.add_argument( "--vm", type=str, action="append", help=( "List of single Compute VM instance names. " "If not provided we assume usage of instance groups. For TPU pods." ), ) tpu_args.add_argument( "--env", type=str, action="append", help="List of environment variables to set on the Compute VM instances. For TPU pods.", ) tpu_args.add_argument( "--main_training_function", type=str, default=None, help="The name of the main function to be executed in your script (only for TPU training).", ) tpu_args.add_argument( "--downcast_bf16", action="store_true", help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", ) # DeepSpeed arguments deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") deepspeed_args.add_argument( "--deepspeed_config_file", default=None, type=str, help="DeepSpeed config file.", ) deepspeed_args.add_argument( "--zero_stage", default=None, type=int, help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `2`.", ) deepspeed_args.add_argument( "--offload_optimizer_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_optimizer_nvme_path", default=None, type=str, help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_nvme_path", default=None, type=str, help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--gradient_accumulation_steps", default=None, type=int, help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1`.", ) deepspeed_args.add_argument( "--gradient_clipping", default=None, type=float, help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1.0`.", ) deepspeed_args.add_argument( "--zero3_init_flag", default=None, type=str, help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", ) deepspeed_args.add_argument( "--zero3_save_16bit_model", default=None, type=str, help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", ) deepspeed_args.add_argument( "--deepspeed_hostfile", default=None, type=str, help="DeepSpeed hostfile for configuring multi-node compute resources.", ) deepspeed_args.add_argument( "--deepspeed_exclusion_filter", default=None, type=str, help="DeepSpeed exclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_inclusion_filter", default=None, type=str, help="DeepSpeed inclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_multinode_launcher", default=None, type=str, help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", ) deepspeed_args.add_argument( "--deepspeed_moe_layer_cls_names", default=None, type=str, help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." " (useful only when `use_deepspeed` flag is passed).", ) # fsdp arguments fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") fsdp_args.add_argument( "--fsdp_offload_params", default="false", type=str, help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_min_num_params", type=int, default=1e8, help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sharding_strategy", type=str, default="FULL_SHARD", help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_auto_wrap_policy", type=str, default=None, help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_transformer_layer_cls_to_wrap", default=None, type=str, help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_backward_prefetch_policy", default=None, type=str, help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.", ) fsdp_args.add_argument( "--fsdp_backward_prefetch", default=None, type=str, help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_state_dict_type", default=None, type=str, help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_forward_prefetch", default="false", type=str, help="If True, then FSDP explicitly prefetches the next upcoming " "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_use_orig_params", default="true", type=str, help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." " (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_cpu_ram_efficient_loading", default="true", type=str, help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sync_module_states", default="true", type=str, help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." " (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_activation_checkpointing", default="false", type=str, help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).", ) # megatron_lm args megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") megatron_lm_args.add_argument( "--megatron_lm_tp_degree", type=int, default=1, help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_pp_degree", type=int, default=1, help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_num_micro_batches", type=int, default=None, help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_sequence_parallelism", default=None, type=str, help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_recompute_activations", default=None, type=str, help="Decides Whether (true|false) to enable Selective Activation Recomputation. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_use_distributed_optimizer", default=None, type=str, help="Decides Whether (true|false) to use distributed optimizer " "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_gradient_clipping", default=1.0, type=float, help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " "(useful only when `use_megatron_lm` flag is passed).", ) # FP8 arguments fp8_args = parser.add_argument_group( "FP8 Arguments", "Arguments related to FP8 training (requires `--mixed_precision=fp8`)" ) fp8_args.add_argument( "--fp8_backend", type=str, choices=["te", "msamp"], help="Choose a backend to train with FP8 (te: TransformerEngine, msamp: MS-AMP)", ) fp8_args.add_argument( "--fp8_use_autocast_during_eval", default=False, action="store_true", help="Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.", ) fp8_args.add_argument( "--fp8_margin", type=int, default=0, help="The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_interval", type=int, default=1, help="The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_format", type=str, default="E4M3", choices=["E4M3", "HYBRID"], help="The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_amax_history_len", type=int, default=1024, help="The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_amax_compute_algo", type=str, default="most_recent", choices=["max", "most_recent"], help="The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_override_linear_precision", type=lambda x: tuple(map(str_to_bool, x.split(","))), default=(False, False, False), help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-seperated string of booleans (useful only when `--fp8_backend=te` is passed).", ) fp8_args.add_argument( "--fp8_opt_level", type=str, default="O2", choices=["O1", "O2"], help="What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed).", ) # AWS arguments aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") aws_args.add_argument( "--aws_access_key_id", type=str, default=None, help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", ) aws_args.add_argument( "--aws_secret_access_key", type=str, default=None, help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", ) parser.add_argument( "--debug", action="store_true", help="Whether to print out the torch.distributed stack trace when something fails.", ) parser.add_argument( "training_script", type=str, help=( "The full path to the script to be launched in parallel, followed by all the arguments for the training " "script." ), ) # MPI arguments mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") mpirun_args.add_argument( "--mpirun_hostfile", type=str, default=None, help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", ) mpirun_args.add_argument( "--mpirun_ccl", type=int, default=1, help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", ) # Other arguments of the training scripts parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") if subparsers is not None: parser.set_defaults(func=launch_command) return parser def simple_launcher(args): cmd, current_env = prepare_simple_launcher_cmd_env(args) process = subprocess.Popen(cmd, env=current_env) process.wait() if process.returncode != 0: if not args.quiet: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) else: sys.exit(1) def multi_gpu_launcher(args): import torch.distributed.run as distrib_run current_env = prepare_multi_gpu_env(args) if not check_cuda_p2p_ib_support(): message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." warn = False if "NCCL_P2P_DISABLE" not in current_env: current_env["NCCL_P2P_DISABLE"] = "1" warn = True if "NCCL_IB_DISABLE" not in current_env: current_env["NCCL_IB_DISABLE"] = "1" warn = True if warn: logger.warning(message) debug = getattr(args, "debug", False) args = _filter_args( args, distrib_run.get_args_parser(), ["--training_script", args.training_script, "--training_script_args", args.training_script_args], ) with patch_environment(**current_env): try: distrib_run.run(args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def deepspeed_launcher(args): import torch.distributed.run as distrib_run if not is_deepspeed_available(): raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") else: from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME cmd, current_env = prepare_deepspeed_cmd_env(args) if not check_cuda_p2p_ib_support(): message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." warn = False if "NCCL_P2P_DISABLE" not in current_env: current_env["NCCL_P2P_DISABLE"] = "1" warn = True if "NCCL_IB_DISABLE" not in current_env: current_env["NCCL_IB_DISABLE"] = "1" warn = True if warn: logger.warning(message) if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f: valid_env_items = convert_dict_to_env_variables(current_env) if len(valid_env_items) > 1: f.writelines(valid_env_items) process = subprocess.Popen(cmd, env=current_env) process.wait() if process.returncode != 0: if not args.quiet: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) else: sys.exit(1) else: debug = getattr(args, "debug", False) args = _filter_args( args, distrib_run.get_args_parser(), ["--training_script", args.training_script, "--training_script_args", args.training_script_args], ) with patch_environment(**current_env): try: distrib_run.run(args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def tpu_launcher(args): import torch_xla.distributed.xla_multiprocessing as xmp if args.no_python: raise ValueError("--no_python cannot be used with TPU launcher") args, current_env = prepare_tpu(args, {}) if args.module: mod_name = args.training_script else: # Import training_script as a module script_path = Path(args.training_script) sys.path.append(str(script_path.parent.resolve())) mod_name = script_path.stem mod = importlib.import_module(mod_name) if not hasattr(mod, args.main_training_function): raise ValueError( f"Your training script should have a function named {args.main_training_function}, or you should pass a " "different value to `--main_training_function`." ) # Patch sys.argv sys.argv = [mod.__file__] + args.training_script_args main_function = getattr(mod, args.main_training_function) with patch_environment(**current_env): xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes) def tpu_pod_launcher(args): from torch_xla.distributed import xla_dist current_env = {} args, current_env = prepare_tpu(args, current_env, True) debug = getattr(args, "debug", False) training_script = args.training_script training_script_args = args.training_script_args new_args = _filter_args( args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"] ) if args.tpu_use_sudo: new_cmd = ["sudo"] else: new_cmd = [] new_cmd += [ "accelerate-launch", "--tpu", "--no_tpu_cluster", "--num_machines", "1", "--mixed_precision", "no", "--dynamo_backend", "no", "--num_processes", str(args.num_processes), "--main_training_function", str(args.main_training_function), training_script, ] + training_script_args new_args.positional = new_cmd bad_flags = "" for arg in vars(new_args): if arg.startswith("docker_"): value = getattr(new_args, arg) if value != "" and value is not None: bad_flags += f'{arg}="{value}"\n' if bad_flags != "": raise ValueError( f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}" ) new_args.env = [f"{k}={v}" for k, v in current_env.items()] new_args.env.append("ACCELERATE_IN_TPU_POD=1") try: xla_dist.resolve_and_execute(new_args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): if not is_sagemaker_available(): raise ImportError( "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" ) if args.module or args.no_python: raise ValueError( "SageMaker requires a python training script file and cannot be used with --module or --no_python" ) from sagemaker.huggingface import HuggingFace args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args) huggingface_estimator = HuggingFace(**args) huggingface_estimator.fit(inputs=sagemaker_inputs) print(f"You can find your model data at: {huggingface_estimator.model_data}") def _validate_launch_command(args): # Sanity checks if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1: raise ValueError( "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." ) if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") defaults = None warned = [] mp_from_config_flag = False # Get the default from the config file. if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: defaults = load_config_from_file(args.config_file) if ( not args.multi_gpu and not args.tpu and not args.tpu_use_cluster and not args.use_deepspeed and not args.use_fsdp and not args.use_megatron_lm ): args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED args.multi_gpu = ( True if defaults.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU, ) else False ) args.tpu = defaults.distributed_type == DistributedType.XLA args.use_fsdp = defaults.distributed_type == DistributedType.FSDP args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False if args.gpu_ids is None: if defaults.gpu_ids is not None: args.gpu_ids = defaults.gpu_ids else: args.gpu_ids = "all" if args.multi_gpu and args.num_machines is None: args.num_machines = defaults.num_machines if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1: raise ValueError( "Less than two GPU ids were configured and tried to run on on multiple GPUs. " "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`." ) if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: # Update args with the defaults for name, attr in defaults.__dict__.items(): if isinstance(attr, dict): for k in defaults.deepspeed_config: setattr(args, k, defaults.deepspeed_config[k]) for k in defaults.fsdp_config: arg_to_set = k if "fsdp" not in arg_to_set: arg_to_set = "fsdp_" + arg_to_set setattr(args, arg_to_set, defaults.fsdp_config[k]) for k in defaults.megatron_lm_config: setattr(args, k, defaults.megatron_lm_config[k]) for k in defaults.dynamo_config: setattr(args, k, defaults.dynamo_config[k]) for k in defaults.ipex_config: setattr(args, k, defaults.ipex_config[k]) for k in defaults.mpirun_config: setattr(args, k, defaults.mpirun_config[k]) continue # Those args are handled separately if ( name not in ["compute_environment", "mixed_precision", "distributed_type"] and getattr(args, name, None) is None ): setattr(args, name, attr) if not args.debug: args.debug = defaults.debug if not args.mixed_precision: if defaults.mixed_precision is None: args.mixed_precision = "no" else: args.mixed_precision = defaults.mixed_precision mp_from_config_flag = True else: if args.use_cpu or (args.use_xpu and torch.xpu.is_available()): native_amp = is_torch_version(">=", "1.10") else: native_amp = is_bf16_available(True) if ( args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_torch_xla_available(check_is_tpu=True)) ): raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") # Silently set the default here if args.dynamo_backend is None: args.dynamo_backend = "no" if args.num_processes == -1: raise ValueError("You need to manually pass in `--num_processes` using this config yaml.") else: if args.num_processes is None: if args.use_xpu and is_xpu_available(): args.num_processes = torch.xpu.device_count() elif is_mlu_available(): args.num_processes = torch.mlu.device_count() elif is_musa_available(): args.num_processes = torch.musa.device_count() elif is_npu_available(): args.num_processes = torch.npu.device_count() else: args.num_processes = torch.cuda.device_count() warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") if args.debug is None: args.debug = False if ( not args.multi_gpu and args.num_processes > 1 and ( (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1) or (is_mlu_available() and torch.mlu.device_count() > 1) or (is_musa_available() and torch.musa.device_count() > 1) or (is_npu_available() and torch.npu.device_count() > 1) or (torch.cuda.device_count() > 1) ) ): warned.append( "\t\tMore than one GPU was found, enabling multi-GPU training.\n" "\t\tIf this was unintended please pass in `--num_processes=1`." ) args.multi_gpu = True if args.num_machines is None: warned.append("\t`--num_machines` was set to a value of `1`") args.num_machines = 1 if args.mixed_precision is None: warned.append("\t`--mixed_precision` was set to a value of `'no'`") args.mixed_precision = "no" if not hasattr(args, "use_cpu"): args.use_cpu = args.cpu if args.dynamo_backend is None: warned.append("\t`--dynamo_backend` was set to a value of `'no'`") args.dynamo_backend = "no" if args.debug: logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.") is_aws_env_disabled = defaults is None or ( defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER ) if is_aws_env_disabled and args.num_cpu_threads_per_process is None: args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1) if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0: local_size = get_int_from_env( ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], max(int(args.num_processes / args.num_machines), 1), ) threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if threads_per_process > 1: args.num_cpu_threads_per_process = threads_per_process warned.append( f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" ) if any(warned): message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" message += "\n".join(warned) message += ( "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." ) logger.warning(message) return args, defaults, mp_from_config_flag def launch_command(args): args, defaults, mp_from_config_flag = _validate_launch_command(args) # Use the proper launcher if args.use_deepspeed and not args.cpu: args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] if mp_from_config_flag: args.deepspeed_fields_from_accelerate_config.append("mixed_precision") args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) deepspeed_launcher(args) elif args.use_fsdp and not args.cpu: multi_gpu_launcher(args) elif args.use_megatron_lm and not args.cpu: multi_gpu_launcher(args) elif args.multi_gpu and not args.cpu: multi_gpu_launcher(args) elif args.tpu and not args.cpu: if args.tpu_use_cluster: tpu_pod_launcher(args) else: tpu_launcher(args) elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: sagemaker_launcher(defaults, args) else: simple_launcher(args) def main(): parser = launch_command_parser() args = parser.parse_args() launch_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/launch.py/0
{ "file_path": "accelerate/src/accelerate/commands/launch.py", "repo_id": "accelerate", "token_count": 19894 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import logging import os from .state import PartialState class MultiProcessAdapter(logging.LoggerAdapter): """ An adapter to assist with logging in multiprocess. `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes or only the main executed one. Default is `main_process_only=True`. Does not require an `Accelerator` object to be created first. """ @staticmethod def _should_log(main_process_only): "Check if log should be performed" state = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def log(self, level, msg, *args, **kwargs): """ Delegates logger call after checking if we should log. Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes or only the main executed one. Default is `True` if not passed Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not break with the previous behavior. `in_order` is ignored if `main_process_only` is passed. """ if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) main_process_only = kwargs.pop("main_process_only", True) in_order = kwargs.pop("in_order", False) # set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice kwargs.setdefault("stacklevel", 2) if self.isEnabledFor(level): if self._should_log(main_process_only): msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) elif in_order: state = PartialState() for i in range(state.num_processes): if i == state.process_index: msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) state.wait_for_everyone() @functools.lru_cache(None) def warning_once(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ self.warning(*args, **kwargs) def get_logger(name: str, log_level: str = None): """ Returns a `logging.Logger` for `name` that can handle multiprocessing. If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all processes and in order, also pass `in_order=True` Args: name (`str`): The name for the logger, such as `__file__` log_level (`str`, *optional*): The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not Example: ```python >>> from accelerate.logging import get_logger >>> from accelerate import Accelerator >>> logger = get_logger(__name__) >>> accelerator = Accelerator() >>> logger.info("My log", main_process_only=False) >>> logger.debug("My log", main_process_only=True) >>> logger = get_logger(__name__, log_level="DEBUG") >>> logger.info("My log") >>> logger.debug("My second log") >>> array = ["a", "b", "c", "d"] >>> letter_at_rank = array[accelerator.process_index] >>> logger.info(letter_at_rank, in_order=True) ``` """ if log_level is None: log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) logger = logging.getLogger(name) if log_level is not None: logger.setLevel(log_level.upper()) logger.root.setLevel(log_level.upper()) return MultiProcessAdapter(logger, {})
accelerate/src/accelerate/logging.py/0
{ "file_path": "accelerate/src/accelerate/logging.py", "repo_id": "accelerate", "token_count": 1842 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs, PartialState class MockModel(torch.nn.Module): def __init__(self): super().__init__() torch.manual_seed(0) self.p = torch.nn.Parameter(torch.randn(40, 20)) def forward(self, x, rank): return self.p * (x ** (1 + rank)) def _run_and_get_grads(model, rank): torch.manual_seed(2024) input = torch.randn(40, 20) output = model(input, rank) output.mean().backward() param = next(model.parameters()) return param.grad def test_ddp_comm_hook(comm_hook, comm_wrapper, comm_state_option): ddp_kwargs = DistributedDataParallelKwargs( comm_hook=comm_hook, comm_wrapper=comm_wrapper, comm_state_option=comm_state_option, ) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) model = accelerator.prepare(MockModel()) hook_grads = _run_and_get_grads(model, accelerator.local_process_index) reference_model = torch.nn.parallel.DistributedDataParallel( MockModel().to(accelerator.device), device_ids=[accelerator.local_process_index], output_device=accelerator.local_process_index, ) reference_grads = _run_and_get_grads(reference_model, accelerator.local_process_index) torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-2, atol=1e-2) def main(): for comm_hook, comm_wrapper, comm_state_option in [ (DDPCommunicationHookType.NO, DDPCommunicationHookType.NO, {}), (DDPCommunicationHookType.FP16, DDPCommunicationHookType.NO, {}), (DDPCommunicationHookType.BF16, DDPCommunicationHookType.NO, {}), (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.NO, {}), (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.FP16, {}), (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.BF16, {}), (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.NO, {"matrix_approximation_rank": 2}), (DDPCommunicationHookType.BATCHED_POWER_SGD, DDPCommunicationHookType.NO, {}), (DDPCommunicationHookType.BATCHED_POWER_SGD, DDPCommunicationHookType.FP16, {}), (DDPCommunicationHookType.BATCHED_POWER_SGD, DDPCommunicationHookType.BF16, {}), ]: print(f"Test DDP comm hook: {comm_hook}, comm wrapper: {comm_wrapper}") test_ddp_comm_hook(comm_hook, comm_wrapper, comm_state_option) PartialState().destroy_process_group() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_ddp_comm_hook.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_ddp_comm_hook.py", "repo_id": "accelerate", "token_count": 1232 }
8
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil from pathlib import Path import torch from ..logging import get_logger from .constants import FSDP_MODEL_NAME, OPTIMIZER_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_NAME from .modeling import is_peft_model from .other import save from .versions import is_torch_version logger = get_logger(__name__) def enable_fsdp_ram_efficient_loading(): """ Enables RAM efficient loading of Hugging Face models for FSDP in the environment. """ # Sets values for `transformers.modeling_utils.is_fsdp_enabled` if "ACCELERATE_USE_FSDP" not in os.environ: os.environ["ACCELERATE_USE_FSDP"] = "True" os.environ["FSDP_CPU_RAM_EFFICIENT_LOADING"] = "True" def disable_fsdp_ram_efficient_loading(): """ Disables RAM efficient loading of Hugging Face models for FSDP in the environment. """ os.environ["FSDP_CPU_RAM_EFFICIENT_LOADING"] = "False" def _get_model_state_dict(model, adapter_only=False): if adapter_only and is_peft_model(model): from peft import get_peft_model_state_dict return get_peft_model_state_dict(model, adapter_name=model.active_adapter) else: return model.state_dict() def _set_model_state_dict(model, state_dict, adapter_only=False): if adapter_only and is_peft_model(model): from peft import set_peft_model_state_dict return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter) else: return model.load_state_dict(state_dict) def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False): # Note: We import here to reduce import time from general modules, and isolate outside dependencies import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultSavePlanner from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType os.makedirs(output_dir, exist_ok=True) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT # so, only enable it when num_processes>1 is_multi_process = accelerator.num_processes > 1 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process fsdp_plugin.state_dict_config.rank0_only = is_multi_process with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): state_dict = _get_model_state_dict(model, adapter_only=adapter_only) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" output_model_file = os.path.join(output_dir, weights_name) if accelerator.process_index == 0: logger.info(f"Saving model to {output_model_file}") torch.save(state_dict, output_model_file) logger.info(f"Model saved to {output_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: weights_name = ( f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) output_model_file = os.path.join(output_dir, weights_name) logger.info(f"Saving model to {output_model_file}") torch.save(state_dict, output_model_file) logger.info(f"Model saved to {output_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}") os.makedirs(ckpt_dir, exist_ok=True) logger.info(f"Saving model to {ckpt_dir}") state_dict = {"model": state_dict} dist_cp.save_state_dict( state_dict=state_dict, storage_writer=dist_cp.FileSystemWriter(ckpt_dir), planner=DefaultSavePlanner(), ) logger.info(f"Model saved to {ckpt_dir}") def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False): # Note: We import here to reduce import time from general modules, and isolate outside dependencies import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType accelerator.wait_for_everyone() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT # so, only enable it when num_processes>1 is_multi_process = accelerator.num_processes > 1 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process fsdp_plugin.state_dict_config.rank0_only = is_multi_process with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(model) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" input_model_file = os.path.join(input_dir, weights_name) logger.info(f"Loading model from {input_model_file}") state_dict = torch.load(input_model_file) logger.info(f"Model loaded from {input_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: weights_name = ( f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) input_model_file = os.path.join(input_dir, weights_name) logger.info(f"Loading model from {input_model_file}") state_dict = torch.load(input_model_file) logger.info(f"Model loaded from {input_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: ckpt_dir = ( os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}") if f"{FSDP_MODEL_NAME}" not in input_dir else input_dir ) logger.info(f"Loading model from {ckpt_dir}") state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only)} dist_cp.load_state_dict( state_dict=state_dict, storage_reader=dist_cp.FileSystemReader(ckpt_dir), planner=DefaultLoadPlanner(), ) state_dict = state_dict["model"] logger.info(f"Model loaded from {ckpt_dir}") load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only) return load_result def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0): # Note: We import here to reduce import time from general modules, and isolate outside dependencies import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultSavePlanner from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType os.makedirs(output_dir, exist_ok=True) with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): optim_state = FSDP.optim_state_dict(model, optimizer) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: optim_state_name = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) output_optimizer_file = os.path.join(output_dir, optim_state_name) logger.info(f"Saving Optimizer state to {output_optimizer_file}") torch.save(optim_state, output_optimizer_file) logger.info(f"Optimizer state saved in {output_optimizer_file}") else: ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") os.makedirs(ckpt_dir, exist_ok=True) logger.info(f"Saving Optimizer state to {ckpt_dir}") dist_cp.save_state_dict( state_dict={"optimizer": optim_state}, storage_writer=dist_cp.FileSystemWriter(ckpt_dir), planner=DefaultSavePlanner(), ) logger.info(f"Optimizer state saved in {ckpt_dir}") def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False): # Note: We import here to reduce import time from general modules, and isolate outside dependencies import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType accelerator.wait_for_everyone() with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: optim_state = None if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: optimizer_name = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) input_optimizer_file = os.path.join(input_dir, optimizer_name) logger.info(f"Loading Optimizer state from {input_optimizer_file}") optim_state = torch.load(input_optimizer_file) logger.info(f"Optimizer state loaded from {input_optimizer_file}") else: ckpt_dir = ( os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") if f"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(f"Loading Optimizer from {ckpt_dir}") optim_state = load_sharded_optimizer_state_dict( model_state_dict=_get_model_state_dict(model, adapter_only=adapter_only), optimizer_key="optimizer", storage_reader=dist_cp.FileSystemReader(ckpt_dir), ) optim_state = optim_state["optimizer"] logger.info(f"Optimizer loaded from {ckpt_dir}") flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state) optimizer.load_state_dict(flattened_osd) def _distributed_checkpoint_to_merged_weights(checkpoint_dir: str, save_path: str, safe_serialization: bool = True): """ Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save` Will save under `save_path` as either `model.safetensors` or `pytorch_model.bin`. """ # Note: We import here to reduce import time from general modules, and isolate outside dependencies import torch.distributed.checkpoint as dist_cp import torch.distributed.checkpoint.format_utils as dist_cp_format_utils state_dict = {} save_path = Path(save_path) save_path.mkdir(exist_ok=True) dist_cp_format_utils._load_state_dict( state_dict, storage_reader=dist_cp.FileSystemReader(checkpoint_dir), planner=dist_cp_format_utils._EmptyStateDictLoadPlanner(), no_dist=True, ) save_path = save_path / SAFE_WEIGHTS_NAME if safe_serialization else save_path / WEIGHTS_NAME # To handle if state is a dict like {model: {...}} if len(state_dict.keys()) == 1: state_dict = state_dict[list(state_dict)[0]] save(state_dict, save_path, safe_serialization=safe_serialization) return save_path def merge_fsdp_weights( checkpoint_dir: str, output_path: str, safe_serialization: bool = True, remove_checkpoint_dir: bool = False ): """ Merge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if `SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}/model.safetensors` if `safe_serialization` else `pytorch_model.bin`. Note: this is a CPU-bound process. Args: checkpoint_dir (`str`): The directory containing the FSDP checkpoints (can be either the model or optimizer). output_path (`str`): The path to save the merged checkpoint. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the merged weights with safetensors (recommended). remove_checkpoint_dir (`bool`, *optional*, defaults to `False`): Whether to remove the checkpoint directory after merging. """ checkpoint_dir = Path(checkpoint_dir) from accelerate.state import PartialState if not is_torch_version(">=", "2.3.0"): raise ValueError("`merge_fsdp_weights` requires PyTorch >= 2.3.0`") # Verify that the checkpoint directory exists if not checkpoint_dir.exists(): model_path_exists = (checkpoint_dir / "pytorch_model_fsdp_0").exists() optimizer_path_exists = (checkpoint_dir / "optimizer_0").exists() err = f"Tried to load from {checkpoint_dir} but couldn't find a valid metadata file." if model_path_exists and optimizer_path_exists: err += " However, potential model and optimizer checkpoint directories exist." err += f"Please pass in either {checkpoint_dir}/pytorch_model_fsdp_0 or {checkpoint_dir}/optimizer_0" err += "instead." elif model_path_exists: err += " However, a potential model checkpoint directory exists." err += f"Please try passing in {checkpoint_dir}/pytorch_model_fsdp_0 instead." elif optimizer_path_exists: err += " However, a potential optimizer checkpoint directory exists." err += f"Please try passing in {checkpoint_dir}/optimizer_0 instead." raise ValueError(err) # To setup `save` to work state = PartialState() if state.is_main_process: logger.info(f"Merging FSDP weights from {checkpoint_dir}") save_path = _distributed_checkpoint_to_merged_weights(checkpoint_dir, output_path, safe_serialization) logger.info(f"Successfully merged FSDP weights and saved to {save_path}") if remove_checkpoint_dir: logger.info(f"Removing old checkpoint directory {checkpoint_dir}") shutil.rmtree(checkpoint_dir) state.wait_for_everyone()
accelerate/src/accelerate/utils/fsdp_utils.py/0
{ "file_path": "accelerate/src/accelerate/utils/fsdp_utils.py", "repo_id": "accelerate", "token_count": 6858 }
9
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/tests/deepspeed/ds_config_zero3.json/0
{ "file_path": "accelerate/tests/deepspeed/ds_config_zero3.json", "repo_id": "accelerate", "token_count": 825 }
10
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import os import re import shutil import tempfile import unittest from pathlib import Path from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import ( TempDirTestCase, get_launch_command, require_huggingface_suite, require_multi_device, require_multi_gpu, require_pippy, require_schedulefree, require_trackers, run_command, slow, ) from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) EXCLUDE_EXAMPLES = [ "cross_validation.py", "checkpointing.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "schedule_free.py", "tracking.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", "early_stopping.py", "ddp_comm_hook.py", "profiler.py", ] class ExampleDifferenceTests(unittest.TestCase): """ This TestCase checks that all of the `complete_*` scripts contain all of the information found in the `by_feature` scripts, line for line. If one fails, then a complete example does not contain all of the features in the features scripts, and should be updated. Each example script should be a single test (such as `test_nlp_example`), and should run `one_complete_example` twice: once with `parser_only=True`, and the other with `parser_only=False`. This is so that when the test failures are returned to the user, they understand if the discrepancy lies in the `main` function, or the `training_loop` function. Otherwise it will be unclear. Also, if there are any expected differences between the base script used and `complete_nlp_example.py` (the canonical base script), these should be included in `special_strings`. These would be differences in how something is logged, print statements, etc (such as calls to `Accelerate.log()`) """ by_feature_path = Path("examples", "by_feature").resolve() examples_path = Path("examples").resolve() def one_complete_example( self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None ): """ Tests a single `complete` example against all of the implemented `by_feature` scripts Args: complete_file_name (`str`): The filename of a complete example parser_only (`bool`): Whether to look at the main training function, or the argument parser secondary_filename (`str`, *optional*): A potential secondary base file to strip all script information not relevant for checking, such as "cv_example.py" when testing "complete_cv_example.py" special_strings (`list`, *optional*): A list of strings to potentially remove before checking no differences are left. These should be diffs that are file specific, such as different logging variations between files. """ self.maxDiff = None for item in os.listdir(self.by_feature_path): if item not in EXCLUDE_EXAMPLES: item_path = self.by_feature_path / item if item_path.is_file() and item_path.suffix == ".py": with self.subTest( tested_script=complete_file_name, feature_script=item, tested_section="main()" if parser_only else "training_function()", ): diff = compare_against_test( self.examples_path / complete_file_name, item_path, parser_only, secondary_filename ) diff = "\n".join(diff) if special_strings is not None: for string in special_strings: diff = diff.replace(string, "") assert diff == "" def test_nlp_examples(self): self.one_complete_example("complete_nlp_example.py", True) self.one_complete_example("complete_nlp_example.py", False) def test_cv_examples(self): cv_path = (self.examples_path / "cv_example.py").resolve() special_strings = [ " " * 16 + "{\n\n", " " * 20 + '"accuracy": eval_metric["accuracy"],\n\n', " " * 20 + '"f1": eval_metric["f1"],\n\n', " " * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', " " * 20 + '"epoch": epoch,\n\n', " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py", True, cv_path, special_strings) self.one_complete_example("complete_cv_example.py", False, cv_path, special_strings) @mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "1"}) @require_huggingface_suite class FeatureExamplesTests(TempDirTestCase): clear_on_setup = False @classmethod def setUpClass(cls): super().setUpClass() cls._tmpdir = tempfile.mkdtemp() cls.config_file = Path(cls._tmpdir) / "default_config.yml" write_basic_config(save_location=cls.config_file) cls.launch_args = get_launch_command(config_file=cls.config_file) @classmethod def tearDownClass(cls): super().tearDownClass() shutil.rmtree(cls._tmpdir) def test_checkpointing_by_epoch(self): testargs = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self.launch_args + testargs) assert (self.tmpdir / "epoch_0").exists() def test_checkpointing_by_steps(self): testargs = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() _ = run_command(self.launch_args + testargs) assert (self.tmpdir / "step_2").exists() def test_load_states_by_epoch(self): testargs = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {self.tmpdir / "epoch_0"} """.split() output = run_command(self.launch_args + testargs, return_stdout=True) assert "epoch 0:" not in output assert "epoch 1:" in output def test_load_states_by_steps(self): testargs = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {self.tmpdir / "step_2"} """.split() output = run_command(self.launch_args + testargs, return_stdout=True) if torch.cuda.is_available(): num_processes = torch.cuda.device_count() else: num_processes = 1 if num_processes > 1: assert "epoch 0:" not in output assert "epoch 1:" in output else: assert "epoch 0:" in output assert "epoch 1:" in output @slow def test_cross_validation(self): testargs = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "0"}): output = run_command(self.launch_args + testargs, return_stdout=True) results = re.findall("({.+})", output) results = [r for r in results if "accuracy" in r][-1] results = ast.literal_eval(results) assert results["accuracy"] >= 0.75 def test_multi_process_metrics(self): testargs = ["examples/by_feature/multi_process_metrics.py"] run_command(self.launch_args + testargs) @require_schedulefree def test_schedulefree(self): testargs = ["examples/by_feature/schedule_free.py"] run_command(self.launch_args + testargs) @require_trackers @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_tracking(self): with tempfile.TemporaryDirectory() as tmpdir: testargs = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self.launch_args + testargs) assert os.path.exists(os.path.join(tmpdir, "tracking")) def test_gradient_accumulation(self): testargs = ["examples/by_feature/gradient_accumulation.py"] run_command(self.launch_args + testargs) def test_local_sgd(self): testargs = ["examples/by_feature/local_sgd.py"] run_command(self.launch_args + testargs) def test_early_stopping(self): testargs = ["examples/by_feature/early_stopping.py"] run_command(self.launch_args + testargs) def test_profiler(self): testargs = ["examples/by_feature/profiler.py"] run_command(self.launch_args + testargs) @require_multi_device def test_ddp_comm_hook(self): testargs = ["examples/by_feature/ddp_comm_hook.py", "--ddp_comm_hook", "fp16"] run_command(self.launch_args + testargs) @require_multi_device def test_distributed_inference_examples_stable_diffusion(self): testargs = ["examples/inference/distributed/stable_diffusion.py"] run_command(self.launch_args + testargs) @require_multi_device def test_distributed_inference_examples_phi2(self): testargs = ["examples/inference/distributed/phi2.py"] run_command(self.launch_args + testargs) @require_pippy @require_multi_gpu def test_pippy_examples_bert(self): testargs = ["examples/inference/pippy/bert.py"] run_command(self.launch_args + testargs) @require_pippy @require_multi_gpu def test_pippy_examples_gpt2(self): testargs = ["examples/inference/pippy/gpt2.py"] run_command(self.launch_args + testargs) @require_pippy @require_multi_gpu def test_pippy_examples_t5(self): testargs = ["examples/inference/pippy/t5.py"] run_command(self.launch_args + testargs)
accelerate/tests/test_examples.py/0
{ "file_path": "accelerate/tests/test_examples.py", "repo_id": "accelerate", "token_count": 4720 }
11
echo "hello world" echo "this is a second command"
accelerate/tests/test_samples/test_command_file.sh/0
{ "file_path": "accelerate/tests/test_samples/test_command_file.sh", "repo_id": "accelerate", "token_count": 14 }
12
#!/bin/bash # Define an array containing the base configs we wish to fine tune configs=("zephyr" "openhermes") # Define an array of loss types loss_types=("sigmoid" "kto_pair" "ipo") # Define an array of beta values betas=("0.01" "0.1" "0.2" "0.3" "0.4" "0.5" "0.6" "0.7" "0.8" "0.9") # Outer loop for loss types for config in "${configs[@]}"; do for loss_type in "${loss_types[@]}"; do # Inner loop for beta values for beta in "${betas[@]}"; do # Determine the job name and model revision based on loss type job_name="$config_${loss_type}_beta_${beta}" model_revision="${loss_type}-${beta}" # Submit the job sbatch --job-name=${job_name} recipes/launch.slurm pref_align_scan dpo $config deepspeed_zero3 \ "--beta=${beta} --loss_type=${loss_type} --output_dir=data/$config-7b-align-scan-${loss_type}-beta-${beta} --hub_model_revision=${model_revision}" done done done
alignment-handbook/recipes/pref_align_scan/launch_scan.sh/0
{ "file_path": "alignment-handbook/recipes/pref_align_scan/launch_scan.sh", "repo_id": "alignment-handbook", "token_count": 430 }
13
# Scripts to Train and Evaluate Chat Models ## Fine-tuning In the handbook, we provide three main ways to align LLMs for chat: - Full fine-tuning on a multi-GPU machine with DeepSpeed ZeRO-3 (tested on an 8 x A100 (80GB) node). - LoRA or QLoRA fine-tuning on a single consumer 24GB GPU (tested on an RTX 4090). - LoRA fine-tuning on a multi-GPU machine with DeepSpeed ZeRO-3 (tested on a 2 x A100s (80GB)). - QLoRA fine-tuning on multi-GPU machine with FSDP (tested on a 2 x A6000s (48GB)). In practice, we find comparable performance for both full and QLoRA fine-tuning, with the latter having the advantage of producing small adapter weights that are fast to upload and download from the Hugging Face Hub. Here are the general commands to fine-tune your models: ```shell # Full training with ZeRO-3 on 8 GPUs ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_{task}.py recipes/{model_name}/{task}/config_full.yaml # QLoRA 4-bit training on a single GPU ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/multi_gpu.yaml --num_processes=1 scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml # LoRA training on a single GPU ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/multi_gpu.yaml --num_processes=1 scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml --load_in_4bit=false # LoRA training with ZeRO-3 on two or more GPUs ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml --num_processes={num_gpus} scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml --load_in_4bit=false # QLoRA training with FSDP on two or more GPUs ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/fsdp+qlora.yaml --num_processes={num_gpus} scripts/run_{task}.py recipes/{model_name}/{task}/config_qlora.yaml --torch_dtype=bfloat16 --bnb_4bit_quant_storage=bfloat16 ``` Here `{task}` refers to the type of training you wish to run. Currently, the following tasks are supported: * continued pretraining `cpt` (note that `cpt` is only present in the `gpt-nl` example recipe) * supervised finetuning `sft` * direct preference optimisation `dpo` * odds ratio preference optimisation `orpo` `{model_name}` refers to the choice of a recipe in the `recipes` directory. For example, to replicate Zephyr-7B-β you can run: ```shell # Step 1 - train SFT policy ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/zephyr-7b-beta/sft/config_full.yaml # Step 2 - align with DPO ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/zephyr-7b-beta/dpo/config_full.yaml ``` **💡 Tip:** If you scale up/down the number of GPUs, we recommend also scaling up the per-device batch size or number of gradient accumulation steps to keep the global batch size constant (and thus replicate our results). By default, these scripts will push each model to your Hugging Face Hub username, i.e. `{username}/{model_name}-{task}`. You can override the parameters in each YAML config by appending them to the command as follows: ```shell # Change batch size, number of epochs etc ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_{task}.py recipes/{model_name}/{task}/config_full.yaml --per_device_train_batch_size=42 --num_train_epochs=5 ``` ## Logging with Weights and Biases By default, all training metrics are logged with TensorBoard. If you have a [Weights and Biases](https://wandb.ai/site) account and are logged in, you can view the training metrics by appending `--report_to=wandb`, e.g. ```shell ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_{task}.py recipes/{model_name}/{task}/config_full.yaml --report_to=wandb ``` ## Launching jobs on a Slurm cluster If you have access to a Slurm cluster, we provide a `recipes/launch.slurm` script that will automatically queue training jobs for you. Here's how you can use it: ```shell sbatch --job-name=handbook_{task} --nodes=1 recipes/launch.slurm {model_name} {task} {precision} {accelerator} ``` Here `{model_name}` and `{task}` are defined as above, while `{precision}` refers to the type of training (`full` vs `qlora`) and `{accelerator}` refers to the choice of 🤗 Accelerate config in `recipes/accelerate_configs`. If you wish to override the default config parameters, you can provide them by appending a space-separated string like `'--arg1=value1 --arg2=value2'. Here's a concrete example to run SFT on 1 node of 8 GPUs: ```shell # Launch on Slurm and override default hyperparameters sbatch --job-name=handbook_sft --nodes=1 recipes/launch.slurm zephyr-7b-beta sft full deepspeed_zero3 '--per_device_train_batch_size=42 --num_train_epochs=5' ``` You can scale the number of nodes by increasing the `--nodes` flag. **⚠️ Note:** the configuration in `recipes/launch.slurm` is optimised for the Hugging Face Compute Cluster and may require tweaking to be adapted to your own compute nodes. ## Fine-tuning on your datasets Under the hood, each training script uses the `get_datasets()` function which allows one to easily combine multiple datasets with varying proportions. For instance, this is how one can specify multiple datasets and which splits to combine in one of the YAML configs: ```yaml datasets_mixer: dataset_1: 0.5 # Use 50% of the training examples dataset_2: 0.66 # Use 66% of the training examples dataset_3: 0.10 # Use 10% of the training examples dataset_splits: - train_xxx # The training splits to mix - test_xxx # The test splits to mix ``` If you want to fine-tune on your datasets, the main thing to keep in mind is how the chat templates are applied to the dataset blend. Since each task (SFT, DPO, ORPO, etc), requires a different format, we assume the datasets have the following columns: **SFT** * `messages`: A list of `dicts` in the form `{"role": "{role}", "content": {content}}`. * See [ultrachat_200k](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) for an example. **DPO and ORPO** * `chosen`: A list of `dicts` in the form `{"role": "{role}", "content": {content}}` corresponding to the preferred dialogue. * `rejected`: A list of `dicts` in the form `{"role": "{role}", "content": {content}}` corresponding to the dispreferred dialogue. * See [ultrafeedback_binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) for an example. We also find it useful to include dedicated splits per task in our datasets, so e.g. we have: * `{train,test}_sft`: Splits for SFT training. * `{train,test}_gen`: Splits for generation ranking like rejection sampling or PPO. * `{train,test}_prefs`: Splits for preference modelling, like reward modelling or DPO. If you format your dataset in the same way, our training scripts should work out of the box! ## Evaluating chat models We recommend benchmarking chat models on: * [MT-Bench](https://huggingface.co/spaces/lmsys/mt-bench): a multi-turn benchmark spanning 80 dialogues and 10 domains. * [AlpacaEval](https://github.com/tatsu-lab/alpaca_eval): a single-turn benchmark that evaluates the helpfulness of chat and instruct models against `text-davinci-003`. For both benchmarks, we have added support for the [Zephyr chat template](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full/blob/ac6e600eefcce74f5e8bae1035d4f66019e93190/tokenizer_config.json#L30) (which is the default produced by our scripts), so you can evaluate models produced by our scripts as follows: **MT-Bench** * Follow the installation instructions [here](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge) * Make sure the word `zephyr` exists in the `--model-path` argument when generating the model responses [here](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge#step-1-generate-model-answers-to-mt-bench-questions). This will ensure the correct chat template is loaded. For example, the following model name is valid: `--model-path {hub_username}/my-baby-zephyr` * Generate the model responses and GPT-4 rankings. **AlpacaEval** * Follow the installation instructions [here](https://github.com/tatsu-lab/alpaca_eval#quick-start) * Copy-paste the [config](https://github.com/tatsu-lab/alpaca_eval/blob/main/src/alpaca_eval/models_configs/zephyr-7b-beta/configs.yaml) for `zephyr-7b-beta` and place it in the `model_configs` directory under `{your_zephyr_model}`. * Next, update the [config name](https://github.com/tatsu-lab/alpaca_eval/blob/2daa6e11b194653043ca74f735728dc068e04aae/src/alpaca_eval/models_configs/zephyr-7b-beta/configs.yaml#L1) and [Hub model ID](https://github.com/tatsu-lab/alpaca_eval/blob/2daa6e11b194653043ca74f735728dc068e04aae/src/alpaca_eval/models_configs/zephyr-7b-beta/configs.yaml#L5) to match your model name. * Follow the steps to evaluate your model [here](https://github.com/tatsu-lab/alpaca_eval/tree/main#evaluating-a-model). Note that MT-Bench and AlpacaEval rely on LLMs like GPT-4 to judge the quality of the model responses, and thus the ranking exhibits various biases including a preference for models distilled from GPTs. For that reason, we also recommend submitting your best models for human evaluation in: * [Chatbot Arena](https://chat.lmsys.org): a live, human evaluation of chat models in head-to-head comparisons.
alignment-handbook/scripts/README.md/0
{ "file_path": "alignment-handbook/scripts/README.md", "repo_id": "alignment-handbook", "token_count": 3136 }
14
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from alignment import DataArguments, H4ArgumentParser, ModelArguments, SFTConfig class H4ArgumentParserTest(unittest.TestCase): def setUp(self): self.parser = H4ArgumentParser((ModelArguments, DataArguments, SFTConfig)) self.yaml_file_path = "tests/fixtures/config_sft_full.yaml" def test_load_yaml(self): model_args, data_args, training_args = self.parser.parse_yaml_file(os.path.abspath(self.yaml_file_path)) self.assertEqual(model_args.model_name_or_path, "mistralai/Mistral-7B-v0.1") def test_load_yaml_and_args(self): command_line_args = [ "--model_name_or_path=test", "--use_peft=true", "--lora_r=16", "--lora_dropout=0.5", ] model_args, data_args, training_args = self.parser.parse_yaml_and_args( os.path.abspath(self.yaml_file_path), command_line_args ) self.assertEqual(model_args.model_name_or_path, "test") self.assertEqual(model_args.use_peft, True) self.assertEqual(model_args.lora_r, 16) self.assertEqual(model_args.lora_dropout, 0.5)
alignment-handbook/tests/test_configs.py/0
{ "file_path": "alignment-handbook/tests/test_configs.py", "repo_id": "alignment-handbook", "token_count": 697 }
15
# candle [![discord server](https://dcbadge.vercel.app/api/server/hugging-face-879548962464493619)](https://discord.gg/hugging-face-879548962464493619) [![Latest version](https://img.shields.io/crates/v/candle-core.svg)](https://crates.io/crates/candle-core) [![Documentation](https://docs.rs/candle-core/badge.svg)](https://docs.rs/candle-core) ![License](https://img.shields.io/crates/l/candle-core.svg) Candle is a minimalist ML framework for Rust with a focus on performance (including GPU support) and ease of use. Try our online demos: [whisper](https://huggingface.co/spaces/lmz/candle-whisper), [LLaMA2](https://huggingface.co/spaces/lmz/candle-llama2), [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm), [yolo](https://huggingface.co/spaces/lmz/candle-yolo), [Segment Anything](https://huggingface.co/spaces/radames/candle-segment-anything-wasm). ## Get started Make sure that you have [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) correctly installed as described in [**Installation**](https://huggingface.github.io/candle/guide/installation.html). Let's see how to run a simple matrix multiplication. Write the following to your `myapp/src/main.rs` file: ```rust use candle_core::{Device, Tensor}; fn main() -> Result<(), Box<dyn std::error::Error>> { let device = Device::Cpu; let a = Tensor::randn(0f32, 1., (2, 3), &device)?; let b = Tensor::randn(0f32, 1., (3, 4), &device)?; let c = a.matmul(&b)?; println!("{c}"); Ok(()) } ``` `cargo run` should display a tensor of shape `Tensor[[2, 4], f32]`. Having installed `candle` with Cuda support, simply define the `device` to be on GPU: ```diff - let device = Device::Cpu; + let device = Device::new_cuda(0)?; ``` For more advanced examples, please have a look at the following section. ## Check out our examples These online demos run entirely in your browser: - [yolo](https://huggingface.co/spaces/lmz/candle-yolo): pose estimation and object recognition. - [whisper](https://huggingface.co/spaces/lmz/candle-whisper): speech recognition. - [LLaMA2](https://huggingface.co/spaces/lmz/candle-llama2): text generation. - [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm): text generation. - [Phi-1.5, and Phi-2](https://huggingface.co/spaces/radames/Candle-Phi-1.5-Wasm): text generation. - [Segment Anything Model](https://huggingface.co/spaces/radames/candle-segment-anything-wasm): Image segmentation. - [BLIP](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning): image captioning. We also provide a some command line based examples using state of the art models: - [LLaMA v1, v2, and v3](./candle-examples/examples/llama/): general LLM, includes the SOLAR-10.7B variant. - [Falcon](./candle-examples/examples/falcon/): general LLM. - [Codegeex4](./candle-examples/examples/codegeex4-9b/): Code completion,code interpreter,web search,fuction calling,repository-level - [GLM4](./candle-examples/examples/glm4/): Open Multilingual Multimodal Chat LMs by THUDM - [Gemma v1 and v2](./candle-examples/examples/gemma/): 2b and 7b+/9b general LLMs from Google Deepmind. - [RecurrentGemma](./candle-examples/examples/recurrent-gemma/): 2b and 7b Griffin based models from Google that mix attention with a RNN like state. - [Phi-1, Phi-1.5, Phi-2, and Phi-3](./candle-examples/examples/phi/): 1.3b, 2.7b, and 3.8b general LLMs with performance on par with 7b models. - [StableLM-3B-4E1T](./candle-examples/examples/stable-lm/): a 3b general LLM pre-trained on 1T tokens of English and code datasets. Also supports StableLM-2, a 1.6b LLM trained on 2T tokens, as well as the code variants. - [Mamba](./candle-examples/examples/mamba/): an inference only implementation of the Mamba state space model. - [Mistral7b-v0.1](./candle-examples/examples/mistral/): a 7b general LLM with better performance than all publicly available 13b models as of 2023-09-28. - [Mixtral8x7b-v0.1](./candle-examples/examples/mixtral/): a sparse mixture of experts 8x7b general LLM with better performance than a Llama 2 70B model with much faster inference. - [StarCoder](./candle-examples/examples/bigcode/) and [StarCoder2](./candle-examples/examples/starcoder2/): LLM specialized to code generation. - [Qwen1.5](./candle-examples/examples/qwen/): Bilingual (English/Chinese) LLMs. - [RWKV v5 and v6](./candle-examples/examples/rwkv/): An RNN with transformer level LLM performance. - [Replit-code-v1.5](./candle-examples/examples/replit-code/): a 3.3b LLM specialized for code completion. - [Yi-6B / Yi-34B](./candle-examples/examples/yi/): two bilingual (English/Chinese) general LLMs with 6b and 34b parameters. - [Quantized LLaMA](./candle-examples/examples/quantized/): quantized version of the LLaMA model using the same quantization techniques as [llama.cpp](https://github.com/ggerganov/llama.cpp). <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/quantized/assets/aoc.gif" width="600"> - [Stable Diffusion](./candle-examples/examples/stable-diffusion/): text to image generative model, support for the 1.5, 2.1, SDXL 1.0 and Turbo versions. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg" width="200"> - [Wuerstchen](./candle-examples/examples/wuerstchen/): another text to image generative model. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" width="200"> - [yolo-v3](./candle-examples/examples/yolo-v3/) and [yolo-v8](./candle-examples/examples/yolo-v8/): object detection and pose estimation models. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.od.jpg" width="200"><img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.pose.jpg" width="200"> - [segment-anything](./candle-examples/examples/segment-anything/): image segmentation model with prompt. <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/sam_merged.jpg" width="200"> - [SegFormer](./candle-examples/examples/segformer/): transformer based semantic segmentation model. - [Whisper](./candle-examples/examples/whisper/): speech recognition model. - [EnCodec](./candle-examples/examples/encodec/): high-quality audio compression model using residual vector quantization. - [MetaVoice](./candle-examples/examples/metavoice/): foundational model for text-to-speech. - [Parler-TTS](./candle-examples/examples/parler-tts/): large text-to-speech model. - [T5](./candle-examples/examples/t5), [Bert](./candle-examples/examples/bert/), [JinaBert](./candle-examples/examples/jina-bert/) : useful for sentence embeddings. - [DINOv2](./candle-examples/examples/dinov2/): computer vision model trained using self-supervision (can be used for imagenet classification, depth evaluation, segmentation). - [VGG](./candle-examples/examples/vgg/), [RepVGG](./candle-examples/examples/repvgg): computer vision models. - [BLIP](./candle-examples/examples/blip/): image to text model, can be used to generate captions for an image. - [CLIP](./candle-examples/examples/clip/): multi-model vision and language model. - [TrOCR](./candle-examples/examples/trocr/): a transformer OCR model, with dedicated submodels for hand-writing and printed recognition. - [Marian-MT](./candle-examples/examples/marian-mt/): neural machine translation model, generates the translated text from the input text. - [Moondream](./candle-examples/examples/moondream/): tiny computer-vision model that can answer real-world questions about images. Run them using commands like: ``` cargo run --example quantized --release ``` In order to use **CUDA** add `--features cuda` to the example command line. If you have cuDNN installed, use `--features cudnn` for even more speedups. There are also some wasm examples for whisper and [llama2.c](https://github.com/karpathy/llama2.c). You can either build them with `trunk` or try them online: [whisper](https://huggingface.co/spaces/lmz/candle-whisper), [llama2](https://huggingface.co/spaces/lmz/candle-llama2), [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm), [Phi-1.5, and Phi-2](https://huggingface.co/spaces/radames/Candle-Phi-1.5-Wasm), [Segment Anything Model](https://huggingface.co/spaces/radames/candle-segment-anything-wasm). For LLaMA2, run the following command to retrieve the weight files and start a test server: ```bash cd candle-wasm-examples/llama2-c wget https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/model.bin wget https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/tokenizer.json trunk serve --release --port 8081 ``` And then head over to [http://localhost:8081/](http://localhost:8081/). <!--- ANCHOR: useful_libraries ---> ## Useful External Resources - [`candle-tutorial`](https://github.com/ToluClassics/candle-tutorial): A very detailed tutorial showing how to convert a PyTorch model to Candle. - [`candle-lora`](https://github.com/EricLBuehler/candle-lora): Efficient and ergonomic LoRA implementation for Candle. `candle-lora` has out-of-the-box LoRA support for many models from Candle, which can be found [here](https://github.com/EricLBuehler/candle-lora/tree/master/candle-lora-transformers/examples). - [`optimisers`](https://github.com/KGrewal1/optimisers): A collection of optimisers including SGD with momentum, AdaGrad, AdaDelta, AdaMax, NAdam, RAdam, and RMSprop. - [`candle-vllm`](https://github.com/EricLBuehler/candle-vllm): Efficient platform for inference and serving local LLMs including an OpenAI compatible API server. - [`candle-ext`](https://github.com/mokeyish/candle-ext): An extension library to Candle that provides PyTorch functions not currently available in Candle. - [`candle-coursera-ml`](https://github.com/vishpat/candle-coursera-ml): Implementation of ML algorithms from Coursera's [Machine Learning Specialization](https://www.coursera.org/specializations/machine-learning-introduction) course. - [`kalosm`](https://github.com/floneum/floneum/tree/master/interfaces/kalosm): A multi-modal meta-framework in Rust for interfacing with local pre-trained models with support for controlled generation, custom samplers, in-memory vector databases, audio transcription, and more. - [`candle-sampling`](https://github.com/EricLBuehler/candle-sampling): Sampling techniques for Candle. - [`gpt-from-scratch-rs`](https://github.com/jeroenvlek/gpt-from-scratch-rs): A port of Andrej Karpathy's _Let's build GPT_ tutorial on YouTube showcasing the Candle API on a toy problem. - [`candle-einops`](https://github.com/tomsanbear/candle-einops): A pure rust implementation of the python [einops](https://github.com/arogozhnikov/einops) library. If you have an addition to this list, please submit a pull request. <!--- ANCHOR_END: useful_libraries ---> <!--- ANCHOR: features ---> ## Features - Simple syntax, looks and feels like PyTorch. - Model training. - Embed user-defined ops/kernels, such as [flash-attention v2](https://github.com/huggingface/candle/blob/89ba005962495f2bfbda286e185e9c3c7f5300a3/candle-flash-attn/src/lib.rs#L152). - Backends. - Optimized CPU backend with optional MKL support for x86 and Accelerate for macs. - CUDA backend for efficiently running on GPUs, multiple GPU distribution via NCCL. - WASM support, run your models in a browser. - Included models. - Language Models. - LLaMA v1, v2, and v3 with variants such as SOLAR-10.7B. - Falcon. - StarCoder, StarCoder2. - Phi 1, 1.5, 2, and 3. - Mamba, Minimal Mamba - Gemma v1 2b and 7b+, v2 2b and 9b. - Mistral 7b v0.1. - Mixtral 8x7b v0.1. - StableLM-3B-4E1T, StableLM-2-1.6B, Stable-Code-3B. - Replit-code-v1.5-3B. - Bert. - Yi-6B and Yi-34B. - Qwen1.5, Qwen1.5 MoE. - RWKV v5 and v6. - Quantized LLMs. - Llama 7b, 13b, 70b, as well as the chat and code variants. - Mistral 7b, and 7b instruct. - Mixtral 8x7b. - Zephyr 7b a and b (Mistral-7b based). - OpenChat 3.5 (Mistral-7b based). - Text to text. - T5 and its variants: FlanT5, UL2, MADLAD400 (translation), CoEdit (Grammar correction). - Marian MT (Machine Translation). - Text to image. - Stable Diffusion v1.5, v2.1, XL v1.0. - Wurstchen v2. - Image to text. - BLIP. - TrOCR. - Audio. - Whisper, multi-lingual speech-to-text. - EnCodec, audio compression model. - MetaVoice-1B, text-to-speech model. - Parler-TTS, text-to-speech model. - Computer Vision Models. - DINOv2, ConvMixer, EfficientNet, ResNet, ViT, VGG, RepVGG, ConvNeXT, ConvNeXTv2, MobileOne, EfficientVit (MSRA), MobileNetv4, Hiera, FastViT. - yolo-v3, yolo-v8. - Segment-Anything Model (SAM). - SegFormer. - File formats: load models from safetensors, npz, ggml, or PyTorch files. - Serverless (on CPU), small and fast deployments. - Quantization support using the llama.cpp quantized types. <!--- ANCHOR_END: features ---> ## How to use <!--- ANCHOR: cheatsheet ---> Cheatsheet: | | Using PyTorch | Using Candle | |------------|------------------------------------------|------------------------------------------------------------------| | Creation | `torch.Tensor([[1, 2], [3, 4]])` | `Tensor::new(&[[1f32, 2.], [3., 4.]], &Device::Cpu)?` | | Creation | `torch.zeros((2, 2))` | `Tensor::zeros((2, 2), DType::F32, &Device::Cpu)?` | | Indexing | `tensor[:, :4]` | `tensor.i((.., ..4))?` | | Operations | `tensor.view((2, 2))` | `tensor.reshape((2, 2))?` | | Operations | `a.matmul(b)` | `a.matmul(&b)?` | | Arithmetic | `a + b` | `&a + &b` | | Device | `tensor.to(device="cuda")` | `tensor.to_device(&Device::new_cuda(0)?)?` | | Dtype | `tensor.to(dtype=torch.float16)` | `tensor.to_dtype(&DType::F16)?` | | Saving | `torch.save({"A": A}, "model.bin")` | `candle::safetensors::save(&HashMap::from([("A", A)]), "model.safetensors")?` | | Loading | `weights = torch.load("model.bin")` | `candle::safetensors::load("model.safetensors", &device)` | <!--- ANCHOR_END: cheatsheet ---> ## Structure - [candle-core](./candle-core): Core ops, devices, and `Tensor` struct definition - [candle-nn](./candle-nn/): Tools to build real models - [candle-examples](./candle-examples/): Examples of using the library in realistic settings - [candle-kernels](./candle-kernels/): CUDA custom kernels - [candle-datasets](./candle-datasets/): Datasets and data loaders. - [candle-transformers](./candle-transformers): transformers-related utilities. - [candle-flash-attn](./candle-flash-attn): Flash attention v2 layer. - [candle-onnx](./candle-onnx/): ONNX model evaluation. ## FAQ ### Why should I use Candle? Candle's core goal is to *make serverless inference possible*. Full machine learning frameworks like PyTorch are very large, which makes creating instances on a cluster slow. Candle allows deployment of lightweight binaries. Secondly, Candle lets you *remove Python* from production workloads. Python overhead can seriously hurt performance, and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches. Finally, Rust is cool! A lot of the HF ecosystem already has Rust crates, like [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers). ### Other ML frameworks - [dfdx](https://github.com/coreylowman/dfdx) is a formidable crate, with shapes being included in types. This prevents a lot of headaches by getting the compiler to complain about shape mismatches right off the bat. However, we found that some features still require nightly, and writing code can be a bit daunting for non rust experts. We're leveraging and contributing to other core crates for the runtime so hopefully both crates can benefit from each other. - [burn](https://github.com/burn-rs/burn) is a general crate that can leverage multiple backends so you can choose the best engine for your workload. - [tch-rs](https://github.com/LaurentMazare/tch-rs.git) Bindings to the torch library in Rust. Extremely versatile, but they bring in the entire torch library into the runtime. The main contributor of `tch-rs` is also involved in the development of `candle`. ### Common Errors #### Missing symbols when compiling with the mkl feature. If you get some missing symbols when compiling binaries/tests using the mkl or accelerate features, e.g. for mkl you get: ``` = note: /usr/bin/ld: (....o): in function `blas::sgemm': .../blas-0.22.0/src/lib.rs:1944: undefined reference to `sgemm_' collect2: error: ld returned 1 exit status = note: some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified = note: use the `-l` flag to specify native libraries to link = note: use the `cargo:rustc-link-lib` directive to specify the native libraries to link with Cargo ``` or for accelerate: ``` Undefined symbols for architecture arm64: "_dgemm_", referenced from: candle_core::accelerate::dgemm::h1b71a038552bcabe in libcandle_core... "_sgemm_", referenced from: candle_core::accelerate::sgemm::h2cf21c592cba3c47 in libcandle_core... ld: symbol(s) not found for architecture arm64 ``` This is likely due to a missing linker flag that was needed to enable the mkl library. You can try adding the following for mkl at the top of your binary: ```rust extern crate intel_mkl_src; ``` or for accelerate: ```rust extern crate accelerate_src; ``` #### Cannot run the LLaMA examples: access to source requires login credentials ``` Error: request error: https://huggingface.co/meta-llama/Llama-2-7b-hf/resolve/main/tokenizer.json: status code 401 ``` This is likely because you're not permissioned for the LLaMA-v2 model. To fix this, you have to register on the huggingface-hub, accept the [LLaMA-v2 model conditions](https://huggingface.co/meta-llama/Llama-2-7b-hf), and set up your authentication token. See issue [#350](https://github.com/huggingface/candle/issues/350) for more details. #### Missing cute/cutlass headers when compiling flash-attn ``` In file included from kernels/flash_fwd_launch_template.h:11:0, from kernels/flash_fwd_hdim224_fp16_sm80.cu:5: kernels/flash_fwd_kernel.h:8:10: fatal error: cute/algorithm/copy.hpp: No such file or directory #include <cute/algorithm/copy.hpp> ^~~~~~~~~~~~~~~~~~~~~~~~~ compilation terminated. Error: nvcc error while compiling: ``` [cutlass](https://github.com/NVIDIA/cutlass) is provided as a git submodule so you may want to run the following command to check it in properly. ```bash git submodule update --init ``` #### Compiling with flash-attention fails ``` /usr/include/c++/11/bits/std_function.h:530:146: error: parameter packs not expanded with ‘...’: ``` This is a bug in gcc-11 triggered by the Cuda compiler. To fix this, install a different, supported gcc version - for example gcc-10, and specify the path to the compiler in the NVCC_CCBIN environment variable. ``` env NVCC_CCBIN=/usr/lib/gcc/x86_64-linux-gnu/10 cargo ... ``` #### Linking error on windows when running rustdoc or mdbook tests ``` Couldn't compile the test. ---- .\candle-book\src\inference\hub.md - Using_the_hub::Using_in_a_real_model_ (line 50) stdout ---- error: linking with `link.exe` failed: exit code: 1181 //very long chain of linking = note: LINK : fatal error LNK1181: cannot open input file 'windows.0.48.5.lib' ``` Make sure you link all native libraries that might be located outside a project target, e.g., to run mdbook tests, you should run: ``` mdbook test candle-book -L .\target\debug\deps\ ` -L native=$env:USERPROFILE\.cargo\registry\src\index.crates.io-6f17d22bba15001f\windows_x86_64_msvc-0.42.2\lib ` -L native=$env:USERPROFILE\.cargo\registry\src\index.crates.io-6f17d22bba15001f\windows_x86_64_msvc-0.48.5\lib ``` #### Extremely slow model load time with WSL This may be caused by the models being loaded from `/mnt/c`, more details on [stackoverflow](https://stackoverflow.com/questions/68972448/why-is-wsl-extremely-slow-when-compared-with-native-windows-npm-yarn-processing). #### Tracking down errors You can set `RUST_BACKTRACE=1` to be provided with backtraces when a candle error is generated. #### CudaRC error If you encounter an error like this one `called `Result::unwrap()` on an `Err` value: LoadLibraryExW { source: Os { code: 126, kind: Uncategorized, message: "The specified module could not be found." } }` on windows. To fix copy and rename these 3 files (make sure they are in path). The paths depend on your cuda version. `c:\Windows\System32\nvcuda.dll` -> `cuda.dll` `c:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\cublas64_12.dll` -> `cublas.dll` `c:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\curand64_10.dll` -> `curand.dll`
candle/README.md/0
{ "file_path": "candle/README.md", "repo_id": "candle", "token_count": 8199 }
16
# Pytorch cheatsheet {{#include ../../../README.md:cheatsheet}}
candle/candle-book/src/guide/cheatsheet.md/0
{ "file_path": "candle/candle-book/src/guide/cheatsheet.md", "repo_id": "candle", "token_count": 26 }
17
use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Layout, Result, Shape}; pub trait BackendStorage: Sized { type Device: BackendDevice; fn try_clone(&self, _: &Layout) -> Result<Self>; fn dtype(&self) -> DType; fn device(&self) -> &Self::Device; // Maybe this should return a Cow instead so that no copy is done on the cpu case. fn to_cpu_storage(&self) -> Result<CpuStorage>; fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self>; fn powf(&self, _: &Layout, _: f64) -> Result<Self>; fn elu(&self, _: &Layout, _: f64) -> Result<Self>; fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self>; fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self>; fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self>; fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self>; fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self>; fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self>; fn conv1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConv1D, ) -> Result<Self>; fn conv_transpose1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self>; fn conv2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConv2D, ) -> Result<Self>; fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self>; fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>; fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>; fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self>; fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self>; fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self>; fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self>; fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self>; fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self>; fn matmul( &self, _: &Self, _: (usize, usize, usize, usize), _: &Layout, _: &Layout, ) -> Result<Self>; fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()>; #[allow(clippy::too_many_arguments)] // Similar to cudaMemcpy2D, though values are in elements and not in bytes. fn copy2d( &self, _: &mut Self, _d1: usize, _d2: usize, _src_stride1: usize, _dst_stride1: usize, _src_offset: usize, _dst_offset: usize, ) -> Result<()>; } pub trait BackendDevice: Sized + std::fmt::Debug + Clone { type Storage: BackendStorage; // TODO: Make the usize generic and part of a generic DeviceLocation. fn new(_: usize) -> Result<Self>; fn location(&self) -> crate::DeviceLocation; fn same_device(&self, _: &Self) -> bool; fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; /// # Safety /// This function is unsafe as it doesn't initialize the underlying data store. /// The caller should ensure that the data is properly initialized as early as possible /// after this call. unsafe fn alloc_uninit(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; fn storage_from_slice<T: crate::WithDType>(&self, _: &[T]) -> Result<Self::Storage>; fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage>; fn storage_from_cpu_storage_owned(&self, _: CpuStorage) -> Result<Self::Storage>; fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>; fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>; fn set_seed(&self, _: u64) -> Result<()>; /// Synchronize should block until all the operations on the device are completed. fn synchronize(&self) -> Result<()>; }
candle/candle-core/src/backend.rs/0
{ "file_path": "candle/candle-core/src/backend.rs", "repo_id": "candle", "token_count": 2111 }
18
/// Helper functions to plug cuda kernels in candle. use crate::{Layout, Result, Shape, WithDType}; pub use cudarc; use cudarc::driver::{CudaSlice, DeviceRepr, ValidAsZeroBits}; use super::{CudaDevice, CudaError, WrapErr}; pub type S = super::CudaStorageSlice; pub trait Map1 { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>>; fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> { let out = match s { S::U8(s) => S::U8(self.f(s, d, l)?), S::U32(s) => S::U32(self.f(s, d, l)?), S::I64(s) => S::I64(self.f(s, d, l)?), S::BF16(s) => S::BF16(self.f(s, d, l)?), S::F16(s) => S::F16(self.f(s, d, l)?), S::F32(s) => S::F32(self.f(s, d, l)?), S::F64(s) => S::F64(self.f(s, d, l)?), }; Ok(out) } } pub trait Map2 { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>>; fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> { let out = match (s1, s2) { (S::U8(s1), S::U8(s2)) => S::U8(self.f(s1, l1, s2, l2, d)?), (S::U32(s1), S::U32(s2)) => S::U32(self.f(s1, l1, s2, l2, d)?), (S::I64(s1), S::I64(s2)) => S::I64(self.f(s1, l1, s2, l2, d)?), (S::BF16(s1), S::BF16(s2)) => S::BF16(self.f(s1, l1, s2, l2, d)?), (S::F16(s1), S::F16(s2)) => S::F16(self.f(s1, l1, s2, l2, d)?), (S::F32(s1), S::F32(s2)) => S::F32(self.f(s1, l1, s2, l2, d)?), (S::F64(s1), S::F64(s2)) => S::F64(self.f(s1, l1, s2, l2, d)?), _ => Err(CudaError::InternalError("dtype mismatch in binary op"))?, }; Ok(out) } } pub trait Map3 { #[allow(clippy::too_many_arguments)] fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, src3: &CudaSlice<T>, layout3: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>>; #[allow(clippy::too_many_arguments)] fn map( &self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, s3: &S, l3: &Layout, d: &CudaDevice, ) -> Result<S> { let out = match (s1, s2, s3) { (S::U8(s1), S::U8(s2), S::U8(s3)) => S::U8(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::U32(s1), S::U32(s2), S::U32(s3)) => S::U32(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::I64(s1), S::I64(s2), S::I64(s3)) => S::I64(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::BF16(s1), S::BF16(s2), S::BF16(s3)) => S::BF16(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::F16(s1), S::F16(s2), S::F16(s3)) => S::F16(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::F32(s1), S::F32(s2), S::F32(s3)) => S::F32(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::F64(s1), S::F64(s2), S::F64(s3)) => S::F64(self.f(s1, l1, s2, l2, s3, l3, d)?), _ => Err(CudaError::InternalError("dtype mismatch in ternary op"))?, }; Ok(out) } } pub trait Map2InPlace { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()>; fn map( &self, dst: &mut S, dst_s: &Shape, src: &S, src_l: &Layout, d: &CudaDevice, ) -> Result<()> { match (dst, src) { (S::U8(dst), S::U8(src)) => self.f(dst, dst_s, src, src_l, d), (S::U32(dst), S::U32(src)) => self.f(dst, dst_s, src, src_l, d), (S::I64(dst), S::I64(src)) => self.f(dst, dst_s, src, src_l, d), (S::BF16(dst), S::BF16(src)) => self.f(dst, dst_s, src, src_l, d), (S::F16(dst), S::F16(src)) => self.f(dst, dst_s, src, src_l, d), (S::F32(dst), S::F32(src)) => self.f(dst, dst_s, src, src_l, d), (S::F64(dst), S::F64(src)) => self.f(dst, dst_s, src, src_l, d), _ => Err(CudaError::InternalError("dtype mismatch in binary op"))?, } } } pub trait Map1Any { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, wrap: W, ) -> Result<S>; fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> { let out = match s { S::U8(s) => self.f(s, d, l, S::U8)?, S::U32(s) => self.f(s, d, l, S::U32)?, S::I64(s) => self.f(s, d, l, S::I64)?, S::BF16(s) => self.f(s, d, l, S::BF16)?, S::F16(s) => self.f(s, d, l, S::F16)?, S::F32(s) => self.f(s, d, l, S::F32)?, S::F64(s) => self.f(s, d, l, S::F64)?, }; Ok(out) } } pub trait Map2Any { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, dev: &CudaDevice, ) -> Result<S>; fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> { let out = match (s1, s2) { (S::U8(s1), S::U8(s2)) => self.f(s1, l1, s2, l2, d)?, (S::U32(s1), S::U32(s2)) => self.f(s1, l1, s2, l2, d)?, (S::I64(s1), S::I64(s2)) => self.f(s1, l1, s2, l2, d)?, (S::BF16(s1), S::BF16(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F16(s1), S::F16(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F32(s1), S::F32(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F64(s1), S::F64(s2)) => self.f(s1, l1, s2, l2, d)?, _ => Err(CudaError::InternalError("dtype mismatch in binary op")).w()?, }; Ok(out) } }
candle/candle-core/src/cuda_backend/utils.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/utils.rs", "repo_id": "candle", "token_count": 3748 }
19
// Just enough pickle support to be able to read PyTorch checkpoints. // This hardcodes objects that are required for tensor reading, we may want to make this a bit more // composable/tensor agnostic at some point. use crate::{DType, Error as E, Layout, Result, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use std::collections::HashMap; use std::io::BufRead; const VERBOSE: bool = false; // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/ #[repr(u8)] #[derive(Debug, Eq, PartialEq, Clone)] pub enum OpCode { // https://github.com/python/cpython/blob/ed25f097160b5cbb0c9a1f9a746d2f1bbc96515a/Lib/pickletools.py#L2123 Proto = 0x80, Global = b'c', BinPut = b'q', LongBinPut = b'r', EmptyTuple = b')', Reduce = b'R', Mark = b'(', BinUnicode = b'X', BinInt = b'J', Tuple = b't', BinPersId = b'Q', BinInt1 = b'K', BinInt2 = b'M', Tuple1 = 0x85, Tuple2 = 0x86, Tuple3 = 0x87, NewTrue = 0x88, NewFalse = 0x89, None = b'N', BinGet = b'h', LongBinGet = b'j', SetItem = b's', SetItems = b'u', EmptyDict = b'}', Dict = b'd', Build = b'b', Stop = b'.', NewObj = 0x81, EmptyList = b']', BinFloat = b'G', Append = b'a', Appends = b'e', } // Avoid using FromPrimitive so as not to drag another dependency. impl TryFrom<u8> for OpCode { type Error = u8; fn try_from(value: u8) -> std::result::Result<Self, Self::Error> { match value { 0x80 => Ok(Self::Proto), b'c' => Ok(Self::Global), b'q' => Ok(Self::BinPut), b'r' => Ok(Self::LongBinPut), b')' => Ok(Self::EmptyTuple), b'R' => Ok(Self::Reduce), b'(' => Ok(Self::Mark), b'X' => Ok(Self::BinUnicode), b'J' => Ok(Self::BinInt), b't' => Ok(Self::Tuple), b'Q' => Ok(Self::BinPersId), b'K' => Ok(Self::BinInt1), b'M' => Ok(Self::BinInt2), b'N' => Ok(Self::None), 0x85 => Ok(Self::Tuple1), 0x86 => Ok(Self::Tuple2), 0x87 => Ok(Self::Tuple3), 0x88 => Ok(Self::NewTrue), 0x89 => Ok(Self::NewFalse), b'h' => Ok(Self::BinGet), b'j' => Ok(Self::LongBinGet), b's' => Ok(Self::SetItem), b'u' => Ok(Self::SetItems), b'}' => Ok(Self::EmptyDict), b'd' => Ok(Self::EmptyDict), b'b' => Ok(Self::Build), b'.' => Ok(Self::Stop), 0x81 => Ok(Self::NewObj), b']' => Ok(Self::EmptyList), b'G' => Ok(Self::BinFloat), b'a' => Ok(Self::Append), b'e' => Ok(Self::Appends), value => Err(value), } } } fn read_to_newline<R: BufRead>(r: &mut R) -> Result<Vec<u8>> { let mut data: Vec<u8> = Vec::with_capacity(32); r.read_until(b'\n', &mut data)?; data.pop(); if data.last() == Some(&b'\r') { data.pop(); } Ok(data) } #[derive(Debug, Clone, PartialEq)] pub enum Object { Class { module_name: String, class_name: String, }, Int(i32), Float(f64), Unicode(String), Bool(bool), None, Tuple(Vec<Object>), List(Vec<Object>), Mark, Dict(Vec<(Object, Object)>), Reduce { callable: Box<Object>, args: Box<Object>, }, Build { callable: Box<Object>, args: Box<Object>, }, PersistentLoad(Box<Object>), } type OResult<T> = std::result::Result<T, Object>; impl Object { pub fn unicode(self) -> OResult<String> { match self { Self::Unicode(t) => Ok(t), _ => Err(self), } } pub fn reduce(self) -> OResult<(Self, Self)> { match self { Self::Reduce { callable, args } => Ok((*callable, *args)), _ => Err(self), } } pub fn none(self) -> OResult<()> { match self { Self::None => Ok(()), _ => Err(self), } } pub fn persistent_load(self) -> OResult<Self> { match self { Self::PersistentLoad(t) => Ok(*t), _ => Err(self), } } pub fn bool(self) -> OResult<bool> { match self { Self::Bool(t) => Ok(t), _ => Err(self), } } pub fn int(self) -> OResult<i32> { match self { Self::Int(t) => Ok(t), _ => Err(self), } } pub fn tuple(self) -> OResult<Vec<Self>> { match self { Self::Tuple(t) => Ok(t), _ => Err(self), } } pub fn dict(self) -> OResult<Vec<(Self, Self)>> { match self { Self::Dict(t) => Ok(t), _ => Err(self), } } pub fn class(self) -> OResult<(String, String)> { match self { Self::Class { module_name, class_name, } => Ok((module_name, class_name)), _ => Err(self), } } pub fn into_tensor_info( self, name: Self, dir_name: &std::path::Path, ) -> Result<Option<TensorInfo>> { let name = match name.unicode() { Ok(name) => name, Err(_) => return Ok(None), }; let (callable, args) = match self.reduce() { Ok(callable_args) => callable_args, _ => return Ok(None), }; let (callable, args) = match callable { Object::Class { module_name, class_name, } if module_name == "torch._tensor" && class_name == "_rebuild_from_type_v2" => { let mut args = args.tuple()?; let callable = args.remove(0); let args = args.remove(1); (callable, args) } Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_parameter" => { let mut args = args.tuple()?; args.remove(0).reduce()? } _ => (callable, args), }; match callable { Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_tensor_v2" => {} _ => return Ok(None), }; let (layout, dtype, file_path, storage_size) = rebuild_args(args)?; Ok(Some(TensorInfo { name, dtype, layout, path: format!("{}/{}", dir_name.to_string_lossy(), file_path), storage_size, })) } } impl TryFrom<Object> for String { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Unicode(s) => Ok(s), other => Err(other), } } } impl TryFrom<Object> for usize { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Int(s) if s >= 0 => Ok(s as usize), other => Err(other), } } } impl<T: TryFrom<Object, Error = Object>> TryFrom<Object> for Vec<T> { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Tuple(values) => { // This does not return the appropriate value in the error case but instead return // the object related to the first error. values .into_iter() .map(|v| T::try_from(v)) .collect::<std::result::Result<Vec<T>, Self::Error>>() } other => Err(other), } } } #[derive(Debug)] pub struct Stack { stack: Vec<Object>, memo: HashMap<u32, Object>, } impl Stack { pub fn empty() -> Self { Self { stack: Vec::with_capacity(512), memo: HashMap::new(), } } pub fn stack(&self) -> &[Object] { self.stack.as_slice() } pub fn read_loop<R: BufRead>(&mut self, r: &mut R) -> Result<()> { loop { if self.read(r)? { break; } } Ok(()) } pub fn finalize(mut self) -> Result<Object> { self.pop() } fn push(&mut self, obj: Object) { self.stack.push(obj) } fn pop(&mut self) -> Result<Object> { match self.stack.pop() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/#Pickle.OpCodes.BUILD fn build(&mut self) -> Result<()> { let args = self.pop()?; let obj = self.pop()?; let obj = match (obj, args) { (Object::Dict(mut obj), Object::Dict(mut args)) => { obj.append(&mut args); Object::Dict(obj) } (obj, args) => Object::Build { callable: Box::new(obj), args: Box::new(args), }, }; self.push(obj); Ok(()) } fn reduce(&mut self) -> Result<()> { let args = self.pop()?; let callable = self.pop()?; #[allow(clippy::single_match)] let reduced = match &callable { Object::Class { module_name, class_name, } => { if module_name == "collections" && (class_name == "OrderedDict" || class_name == "defaultdict") { // TODO: have a separate ordered dict and a separate default dict. Some(Object::Dict(vec![])) } else { None } } _ => None, }; let reduced = reduced.unwrap_or_else(|| Object::Reduce { callable: Box::new(callable), args: Box::new(args), }); self.push(reduced); Ok(()) } fn last(&mut self) -> Result<&mut Object> { match self.stack.last_mut() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } fn memo_get(&self, id: u32) -> Result<Object> { match self.memo.get(&id) { None => crate::bail!("missing object in memo {id}"), Some(obj) => { // Maybe we should use refcounting rather than doing potential large clones here. Ok(obj.clone()) } } } fn memo_put(&mut self, id: u32) -> Result<()> { let obj = self.last()?.clone(); self.memo.insert(id, obj); Ok(()) } fn persistent_load(&self, id: Object) -> Result<Object> { Ok(Object::PersistentLoad(Box::new(id))) } fn new_obj(&self, class: Object, args: Object) -> Result<Object> { Ok(Object::Reduce { callable: Box::new(class), args: Box::new(args), }) } fn pop_to_marker(&mut self) -> Result<Vec<Object>> { let mut mark_idx = None; for (idx, obj) in self.stack.iter().enumerate().rev() { if obj == &Object::Mark { mark_idx = Some(idx); break; } } match mark_idx { Some(mark_idx) => { let objs = self.stack.split_off(mark_idx + 1); self.stack.pop(); Ok(objs) } None => { crate::bail!("marker object not found") } } } pub fn read<R: BufRead>(&mut self, r: &mut R) -> Result<bool> { let op_code = match OpCode::try_from(r.read_u8()?) { Ok(op_code) => op_code, Err(op_code) => { crate::bail!("unknown op-code {op_code}") } }; // println!("op: {op_code:?}"); // println!("{:?}", self.stack); match op_code { OpCode::Proto => { let version = r.read_u8()?; if VERBOSE { println!("proto {version}"); } } OpCode::Global => { let module_name = read_to_newline(r)?; let class_name = read_to_newline(r)?; let module_name = String::from_utf8_lossy(&module_name).to_string(); let class_name = String::from_utf8_lossy(&class_name).to_string(); self.push(Object::Class { module_name, class_name, }) } OpCode::BinInt1 => { let arg = r.read_u8()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt2 => { let arg = r.read_u16::<LittleEndian>()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt => { let arg = r.read_i32::<LittleEndian>()?; self.push(Object::Int(arg)) } OpCode::BinFloat => { // Somehow floats are encoded using BigEndian whereas int types use LittleEndian. // https://github.com/python/cpython/blob/0c80da4c14d904a367968955544dd6ae58c8101c/Lib/pickletools.py#L855 // https://github.com/pytorch/pytorch/blob/372d078f361e726bb4ac0884ac334b04c58179ef/torch/_weights_only_unpickler.py#L243 let arg = r.read_f64::<byteorder::BigEndian>()?; self.push(Object::Float(arg)) } OpCode::BinUnicode => { let len = r.read_u32::<LittleEndian>()?; let mut data = vec![0u8; len as usize]; r.read_exact(&mut data)?; let data = String::from_utf8(data).map_err(E::wrap)?; self.push(Object::Unicode(data)) } OpCode::BinPersId => { let id = self.pop()?; let obj = self.persistent_load(id)?; self.push(obj) } OpCode::Tuple => { let objs = self.pop_to_marker()?; self.push(Object::Tuple(objs)) } OpCode::Tuple1 => { let obj = self.pop()?; self.push(Object::Tuple(vec![obj])) } OpCode::Tuple2 => { let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2])) } OpCode::Tuple3 => { let obj3 = self.pop()?; let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2, obj3])) } OpCode::NewTrue => self.push(Object::Bool(true)), OpCode::NewFalse => self.push(Object::Bool(false)), OpCode::Append => { let value = self.pop()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.push(value) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::Appends => { let objs = self.pop_to_marker()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.extend(objs) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::SetItem => { let value = self.pop()?; let key = self.pop()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { d.push((key, value)) } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::SetItems => { let mut objs = self.pop_to_marker()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().unwrap(); d.push((key, value)) } } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::None => self.push(Object::None), OpCode::Stop => { return Ok(true); } OpCode::Build => self.build()?, OpCode::EmptyDict => self.push(Object::Dict(vec![])), OpCode::Dict => { let mut objs = self.pop_to_marker()?; let mut pydict = vec![]; if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().unwrap(); pydict.push((key, value)) } self.push(Object::Dict(pydict)) } OpCode::Mark => self.push(Object::Mark), OpCode::Reduce => self.reduce()?, OpCode::EmptyTuple => self.push(Object::Tuple(vec![])), OpCode::EmptyList => self.push(Object::List(vec![])), OpCode::BinGet => { let arg = r.read_u8()?; let obj = self.memo_get(arg as u32)?; self.push(obj) } OpCode::LongBinGet => { let arg = r.read_u32::<LittleEndian>()?; let obj = self.memo_get(arg)?; self.push(obj) } OpCode::BinPut => { let arg = r.read_u8()?; self.memo_put(arg as u32)? } OpCode::LongBinPut => { let arg = r.read_u32::<LittleEndian>()?; self.memo_put(arg)? } OpCode::NewObj => { let args = self.pop()?; let class = self.pop()?; let obj = self.new_obj(class, args)?; self.push(obj) } } Ok(false) } } impl From<Object> for E { fn from(value: Object) -> Self { E::Msg(format!("conversion error on {value:?}")) } } // https://github.com/pytorch/pytorch/blob/4eac43d046ded0f0a5a5fa8db03eb40f45bf656e/torch/_utils.py#L198 // Arguments: storage, storage_offset, size, stride, requires_grad, backward_hooks fn rebuild_args(args: Object) -> Result<(Layout, DType, String, usize)> { let mut args = args.tuple()?; let stride = Vec::<usize>::try_from(args.remove(3))?; let size = Vec::<usize>::try_from(args.remove(2))?; let offset = args.remove(1).int()? as usize; let storage = args.remove(0).persistent_load()?; let mut storage = storage.tuple()?; let storage_size = storage.remove(4).int()? as usize; let path = storage.remove(2).unicode()?; let (_module_name, class_name) = storage.remove(1).class()?; let dtype = match class_name.as_str() { "FloatStorage" => DType::F32, "DoubleStorage" => DType::F64, "HalfStorage" => DType::F16, "BFloat16Storage" => DType::BF16, "ByteStorage" => DType::U8, "LongStorage" => DType::I64, other => { crate::bail!("unsupported storage type {other}") } }; let layout = Layout::new(crate::Shape::from(size), stride, offset); Ok((layout, dtype, path, storage_size)) } #[derive(Debug, Clone)] pub struct TensorInfo { pub name: String, pub dtype: DType, pub layout: Layout, pub path: String, pub storage_size: usize, } /// Read the tensor info from a .pth file. /// /// # Arguments /// * `file` - The path to the .pth file. /// * `verbose` - Whether to print debug information. /// * `key` - Optional key to retrieve `state_dict` from the pth file. pub fn read_pth_tensor_info<P: AsRef<std::path::Path>>( file: P, verbose: bool, key: Option<&str>, ) -> Result<Vec<TensorInfo>> { let file = std::fs::File::open(file)?; let zip_reader = std::io::BufReader::new(file); let mut zip = zip::ZipArchive::new(zip_reader)?; let zip_file_names = zip .file_names() .map(|f| f.to_string()) .collect::<Vec<String>>(); let mut tensor_infos = vec![]; for file_name in zip_file_names.iter() { if !file_name.ends_with("data.pkl") { continue; } let dir_name = std::path::PathBuf::from(file_name.strip_suffix(".pkl").unwrap()); let reader = zip.by_name(file_name)?; let mut reader = std::io::BufReader::new(reader); let mut stack = Stack::empty(); stack.read_loop(&mut reader)?; let obj = stack.finalize()?; if VERBOSE || verbose { println!("{obj:#?}"); } let obj = match obj { Object::Build { callable, args } => match *callable { Object::Reduce { callable, args: _ } => match *callable { Object::Class { module_name, class_name, } if module_name == "__torch__" && class_name == "Module" => *args, _ => continue, }, _ => continue, }, obj => obj, }; // If key is provided, then we need to extract the state_dict from the object. let obj = if let Some(key) = key { if let Object::Dict(key_values) = obj { key_values .into_iter() .find(|(k, _)| *k == Object::Unicode(key.to_owned())) .map(|(_, v)| v) .ok_or_else(|| E::Msg(format!("key {key} not found")))? } else { obj } } else { obj }; // If the object is a dict, then we can extract the tensor info from it. // NOTE: We are assuming that the `obj` is state_dict by this stage. if let Object::Dict(key_values) = obj { for (name, value) in key_values.into_iter() { match value.into_tensor_info(name, &dir_name) { Ok(Some(tensor_info)) => tensor_infos.push(tensor_info), Ok(None) => {} Err(err) => eprintln!("skipping: {err:?}"), } } } } Ok(tensor_infos) } /// Lazy tensor loader. pub struct PthTensors { tensor_infos: HashMap<String, TensorInfo>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl PthTensors { pub fn new<P: AsRef<std::path::Path>>(path: P, key: Option<&str>) -> Result<Self> { let tensor_infos = read_pth_tensor_info(path.as_ref(), false, key)?; let tensor_infos = tensor_infos .into_iter() .map(|ti| (ti.name.to_string(), ti)) .collect(); let path = path.as_ref().to_owned(); Ok(Self { tensor_infos, path }) } pub fn tensor_infos(&self) -> &HashMap<String, TensorInfo> { &self.tensor_infos } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { use std::io::Read; let tensor_info = match self.tensor_infos.get(name) { None => return Ok(None), Some(tensor_info) => tensor_info, }; // We hope that the file has not changed since first reading it. let zip_reader = std::io::BufReader::new(std::fs::File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_name(&tensor_info.path)?; let is_fortran_contiguous = tensor_info.layout.is_fortran_contiguous(); let rank = tensor_info.layout.shape().rank(); // Reading the data is a bit tricky as it can be strided, for now only support the basic // case and when the tensor is fortran contiguous. if !tensor_info.layout.is_contiguous() && !is_fortran_contiguous { crate::bail!( "cannot retrieve non-contiguous tensors {:?}", tensor_info.layout ) } let start_offset = tensor_info.layout.start_offset(); if start_offset > 0 { std::io::copy( &mut reader.by_ref().take(start_offset as u64), &mut std::io::sink(), )?; } let tensor = Tensor::from_reader( tensor_info.layout.shape().clone(), tensor_info.dtype, &mut reader, )?; if rank > 1 && is_fortran_contiguous { // Reverse the shape, e.g. Shape(2, 3, 4) -> Shape(4, 3, 2) let shape_reversed: Vec<_> = tensor_info.layout.dims().iter().rev().cloned().collect(); let tensor = tensor.reshape(shape_reversed)?; // Permute (transpose) the dimensions, e.g. Shape(4, 3, 2) -> Shape(2, 3, 4) let dim_indeces_reversed: Vec<_> = (0..rank).rev().collect(); let tensor = tensor.permute(dim_indeces_reversed)?; Ok(Some(tensor)) } else { Ok(Some(tensor)) } } } /// Read all the tensors from a PyTorch pth file with a given key. /// /// # Arguments /// * `path` - Path to the pth file. /// * `key` - Optional key to retrieve `state_dict` from the pth file. Sometimes the pth file /// contains multiple objects and the state_dict is the one we are interested in. pub fn read_all_with_key<P: AsRef<std::path::Path>>( path: P, key: Option<&str>, ) -> Result<Vec<(String, Tensor)>> { let pth = PthTensors::new(path, key)?; let tensor_names = pth.tensor_infos.keys(); let mut tensors = Vec::with_capacity(tensor_names.len()); for name in tensor_names { if let Some(tensor) = pth.get(name)? { tensors.push((name.to_string(), tensor)) } } Ok(tensors) } /// Read all the tensors from a PyTorch pth file. /// /// # Arguments /// * `path` - Path to the pth file. pub fn read_all<P: AsRef<std::path::Path>>(path: P) -> Result<Vec<(String, Tensor)>> { read_all_with_key(path, None) }
candle/candle-core/src/pickle.rs/0
{ "file_path": "candle/candle-core/src/pickle.rs", "repo_id": "candle", "token_count": 14306 }
20
use crate::{Result, Tensor}; use rayon::prelude::*; #[derive(Debug, Clone, Copy)] struct ArgSort { asc: bool, last_dim: usize, } impl ArgSort { fn asort<T: crate::WithDType>(&self, vs: &[T], layout: &crate::Layout) -> Vec<u32> { #[allow(clippy::uninit_vec)] // Safety: indexes are set later in the parallelized section. let mut sort_indexes = unsafe { let el_count = layout.shape().elem_count(); let mut v = Vec::with_capacity(el_count); v.set_len(el_count); v }; if self.asc { sort_indexes .par_chunks_exact_mut(self.last_dim) .zip(vs.par_chunks_exact(self.last_dim)) .for_each(|(indexes, vs)| { indexes .iter_mut() .enumerate() .for_each(|(i, v)| *v = i as u32); indexes.sort_by(|&i, &j| { vs[i as usize] .partial_cmp(&vs[j as usize]) .unwrap_or(std::cmp::Ordering::Greater) }) }); } else { sort_indexes .par_chunks_exact_mut(self.last_dim) .zip(vs.par_chunks_exact(self.last_dim)) .for_each(|(indexes, vs)| { indexes .iter_mut() .enumerate() .for_each(|(i, v)| *v = i as u32); indexes.sort_by(|&j, &i| { vs[i as usize] .partial_cmp(&vs[j as usize]) .unwrap_or(std::cmp::Ordering::Greater) }) }); } sort_indexes } } impl crate::CustomOp1 for ArgSort { fn name(&self) -> &'static str { "argsort" } fn cpu_fwd( &self, storage: &crate::CpuStorage, layout: &crate::Layout, ) -> Result<(crate::CpuStorage, crate::Shape)> { let sort_indexes = match storage { crate::CpuStorage::U8(vs) => self.asort(vs, layout), crate::CpuStorage::U32(vs) => self.asort(vs, layout), crate::CpuStorage::I64(vs) => self.asort(vs, layout), crate::CpuStorage::BF16(vs) => self.asort(vs, layout), crate::CpuStorage::F16(vs) => self.asort(vs, layout), crate::CpuStorage::F32(vs) => self.asort(vs, layout), crate::CpuStorage::F64(vs) => self.asort(vs, layout), }; let sort_indexes = crate::CpuStorage::U32(sort_indexes); Ok((sort_indexes, layout.shape().into())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &crate::CudaStorage, layout: &crate::Layout, ) -> Result<(crate::CudaStorage, crate::Shape)> { use crate::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, ValidAsZeroBits, }; use crate::cuda_backend::{kernel_name, kernels, CudaStorageSlice as S, Map1Any, WrapErr}; use crate::{CudaDevice, WithDType}; impl Map1Any for ArgSort { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &crate::Layout, _wrap: W, ) -> Result<S> { let slice = match layout.contiguous_offsets() { None => crate::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let elem_count = layout.shape().elem_count(); let dst = unsafe { dev.alloc::<u32>(elem_count) }.w()?; let func = if self.asc { dev.get_or_load_func(&kernel_name::<T>("asort_asc"), kernels::SORT)? } else { dev.get_or_load_func(&kernel_name::<T>("asort_desc"), kernels::SORT)? }; let ncols = self.last_dim; let nrows = elem_count / ncols; let ncols_pad = next_power_of_2(ncols); let params = (&slice, &dst, ncols as i32, ncols_pad as i32); let cfg = LaunchConfig { grid_dim: (1, nrows as u32, 1), block_dim: (ncols_pad as u32, 1, 1), shared_mem_bytes: (ncols_pad * std::mem::size_of::<u32>()) as u32, }; unsafe { func.launch(cfg, params) }.w()?; Ok(S::U32(dst)) } } use crate::backend::BackendStorage; let dev = storage.device(); let slice = self.map(&storage.slice, dev, layout)?; let dst = crate::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &crate::MetalStorage, layout: &crate::Layout, ) -> Result<(crate::MetalStorage, crate::Shape)> { use crate::backend::BackendStorage; use crate::DType; let name = { if self.asc { match storage.dtype() { DType::BF16 => "asort_asc_bf16", DType::F16 => "asort_asc_f16", DType::F32 => "asort_asc_f32", DType::F64 => "asort_asc_f64", DType::U8 => "asort_asc_u8", DType::U32 => "asort_asc_u32", DType::I64 => "asort_asc_i64", } } else { match storage.dtype() { DType::BF16 => "asort_desc_bf16", DType::F16 => "asort_desc_f16", DType::F32 => "asort_desc_f32", DType::F64 => "asort_desc_f64", DType::U8 => "asort_desc_u8", DType::U32 => "asort_desc_u32", DType::I64 => "asort_desc_i64", } } }; let device = storage.device(); let kernels = device.kernels(); let command_buffer = device.command_buffer()?; let el = layout.shape().elem_count(); let ncols = self.last_dim; let nrows = el / ncols; let src = crate::metal_backend::buffer_o(storage.buffer(), layout, storage.dtype()); let dst = device.new_buffer(el, DType::U32, "asort")?; let mut ncols_pad = 1; while ncols_pad < ncols { ncols_pad *= 2; } candle_metal_kernels::call_arg_sort( device.metal_device(), &command_buffer, kernels, name, nrows, ncols, ncols_pad, src, &dst, ) .map_err(crate::Error::wrap)?; let dst = crate::MetalStorage::new(dst, device.clone(), el, DType::U32); Ok((dst, layout.shape().clone())) } } #[allow(unused)] fn next_power_of_2(x: usize) -> usize { let mut n = 1; while n < x { n *= 2 } n } impl Tensor { /// Returns the indices that sort the tensor along the last dimension. /// /// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in /// descending order. The sort is unstable so there is no guarantees on the final order when it /// comes to ties. pub fn arg_sort_last_dim(&self, asc: bool) -> Result<Tensor> { if !self.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "arg_sort_last_dim", }); } let last_dim = match self.dims().last() { None => crate::bail!("empty last-dim in arg-sort"), Some(last_dim) => *last_dim, }; // No need for a backward pass for arg sort. self.apply_op1_no_bwd(&ArgSort { asc, last_dim }) } /// Sorts the tensor along the last dimension, returns the sorted tensor together with the /// sorted indexes. /// /// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in /// descending order. The sort is unstable so there is no guarantees on the final order when it /// comes to ties. pub fn sort_last_dim(&self, asc: bool) -> Result<(Tensor, Tensor)> { if !self.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "sort_last_dim", }); } let asort = self.arg_sort_last_dim(asc)?; let sorted = self.gather(&asort, crate::D::Minus1)?; Ok((sorted, asort)) } }
candle/candle-core/src/sort.rs/0
{ "file_path": "candle/candle-core/src/sort.rs", "repo_id": "candle", "token_count": 4817 }
21
use candle_core::{test_device, DType, Device, IndexOp, Result, Tensor}; fn matmul(device: &Device) -> Result<()> { let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); let data = vec![1.0f32, 2.0]; let a = Tensor::from_slice(&data, (2, 1), device)?; let data = vec![3.0f32, 4.0]; let b = Tensor::from_slice(&data, (1, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]); let data: Vec<_> = (0..6).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 3), device)?; let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (3, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]); let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (2, 3, 2), device)?; let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]]; let c = a.matmul(&b)?; assert_eq!(c.to_vec3::<f32>()?, &expected); // Also perform the matmul on contiguous transposed versions. let a_tt = a.t()?.contiguous()?.t()?; assert!(!a_tt.is_contiguous()); assert_eq!(a.dims(), a_tt.dims()); assert_eq!(a_tt.stride(), &[6, 1, 2]); let b_tt = b.t()?.contiguous()?.t()?; assert!(!b_tt.is_contiguous()); assert_eq!(b.dims(), b_tt.dims()); assert_eq!(b_tt.stride(), &[6, 1, 3]); assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected); assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); Ok(()) } fn matmul_bf16(device: &Device) -> Result<()> { if !device.supports_bf16() { return Ok(()); } let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?; let c = a.matmul(&b)?.to_dtype(DType::F32)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); Ok(()) } fn broadcast_matmul(device: &Device) -> Result<()> { let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?; let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?; let out = lhs.broadcast_matmul(&rhs)?; assert_eq!(out.dims(), &[3, 6, 4, 2]); for idx1 in 0..3 { for idx2 in 0..6 { let out = out.i((idx1, idx2))?; let lhs = lhs.i((idx1, 0))?; let rhs = rhs.i(idx2)?; let out2 = lhs.matmul(&rhs); let sum_diff2 = (out - out2)?.sqr()?.sum_all()?; // With cuda, we see errors of up to ~1e-12. assert!(sum_diff2.to_vec0::<f32>()? < 1e-6) } } Ok(()) } // https://github.com/huggingface/candle/issues/1948 fn squeeze_mm(device: &Device) -> Result<()> { let seq_len = 8_usize; let a = Tensor::zeros((1, seq_len, 16), DType::F32, device)?; let x = a.i((.., seq_len - 1, ..))?; let w = Tensor::zeros((32, 16), DType::F32, device)?.t()?; let x = x.matmul(&w)?; assert_eq!(x.dims(), &[1, 32]); Ok(()) } // https://github.com/huggingface/candle/issues/1992 fn mm_layout(device: &Device) -> Result<()> { let a = Tensor::arange(0f32, 16f32, device)?.reshape((1, 1, 4, 4))?; let b = Tensor::arange(0f32, 8f32, device)?.reshape((1, 1, 4, 2))?; let mm1 = a.matmul(&b)?; // Forces the layout to be: // shape: [1, 1, 4, 2], stride: [8, 2, 2, 1], start_offset: 0 // This is still a contiguous matrix but matmul checks are only the two last dimensions have // non 1 sizes but matmul check may be reluctant to handle it. let b = b.transpose(1, 2)?.force_contiguous()?.transpose(1, 2)?; let mm2 = a.matmul(&b)?; let diff = (mm1 - mm2)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0.); Ok(()) } test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal); test_device!( matmul_bf16, matmul_bf16_cpu, matmul_bf16_gpu, matmul_bf16_metal ); test_device!( broadcast_matmul, broadcast_matmul_cpu, broadcast_matmul_gpu, broadcast_matmul_metal ); test_device!(squeeze_mm, squeeze_mm_cpu, squeeze_mm_gpu, squeeze_mm_metal); test_device!(mm_layout, mm_layout_cpu, mm_layout_gpu, mm_layout_metal);
candle/candle-core/tests/matmul_tests.rs/0
{ "file_path": "candle/candle-core/tests/matmul_tests.rs", "repo_id": "candle", "token_count": 2363 }
22
//! Datasets & Dataloaders for Candle pub mod batcher; pub mod hub; pub mod nlp; pub mod vision; pub use batcher::Batcher;
candle/candle-datasets/src/lib.rs/0
{ "file_path": "candle/candle-datasets/src/lib.rs", "repo_id": "candle", "token_count": 45 }
23
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::bigcode::{Config, GPTBigCode}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: GPTBigCode, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, } impl TextGeneration { fn new( model: GPTBigCode, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); print!("{prompt}"); std::io::stdout().flush()?; let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let (context_size, past_len) = if self.model.config().use_cache && index > 0 { (1, tokens.len().saturating_sub(1)) } else { (tokens.len(), 0) }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, past_len)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({:.3} token/s)", sample_len as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "bigcode/starcoderbase-1b")] model_id: String, #[arg(long, default_value = "main")] revision: String, #[arg(long)] weight_file: Option<String>, } fn main() -> Result<()> { let args = Args::parse(); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => ["model.safetensors"] .iter() .map(|f| repo.get(f)) .collect::<std::result::Result<Vec<_>, _>>()?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let config = Config::starcoder_1b(); let model = GPTBigCode::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/bigcode/main.rs/0
{ "file_path": "candle/candle-examples/examples/bigcode/main.rs", "repo_id": "candle", "token_count": 2134 }
24
use enterpolation::linear::ConstEquidistantLinear; use enterpolation::Generator; use palette::LinSrgb; use candle::Tensor; pub struct SpectralRColormap { gradient: ConstEquidistantLinear<f32, LinSrgb, 9>, } impl SpectralRColormap { pub(crate) fn new() -> Self { // Define a colormap similar to 'Spectral_r' by specifying key colors. // got the colors from ChatGPT-4o let gradient = ConstEquidistantLinear::<f32, _, 9>::equidistant_unchecked([ LinSrgb::new(0.3686, 0.3098, 0.6353), // Dark blue LinSrgb::new(0.1961, 0.5333, 0.7412), // Blue LinSrgb::new(0.4000, 0.7608, 0.6471), // Cyan LinSrgb::new(0.6706, 0.8667, 0.6431), // Green LinSrgb::new(0.9020, 0.9608, 0.5961), // Yellow LinSrgb::new(0.9961, 0.8784, 0.5451), // Orange LinSrgb::new(0.9922, 0.6824, 0.3804), // Red LinSrgb::new(0.9569, 0.4275, 0.2627), // Dark red LinSrgb::new(0.8353, 0.2431, 0.3098), // Dark purple ]); Self { gradient } } fn get_color(&self, value: f32) -> LinSrgb { self.gradient.gen(value) } pub fn gray2color(&self, gray: &Tensor) -> candle::Result<Tensor> { println!("Gray: {:?}", gray.dims()); let gray_values: Vec<f32> = gray.flatten_all()?.to_vec1()?; let rgb_values: Vec<f32> = gray_values .iter() .map(|g| self.get_color(*g)) .flat_map(|rgb| [rgb.red, rgb.green, rgb.blue]) .collect(); let [.., height, width] = gray.dims() else { candle::bail!("Not enough dims!") }; let color = Tensor::from_vec(rgb_values, (*height, *width, 3), gray.device())?; color.permute((2, 0, 1)) } }
candle/candle-examples/examples/depth_anything_v2/color_map.rs/0
{ "file_path": "candle/candle-examples/examples/depth_anything_v2/color_map.rs", "repo_id": "candle", "token_count": 896 }
25
//! EVA-02: Explore the limits of Visual representation at scAle //! https://github.com/baaivision/EVA #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::Parser; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::eva2; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 448, 448). OpenAI normalization is applied. pub fn load_image448_openai_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(448, 448, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (448, 448, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], &Device::Cpu)? .reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = load_image448_openai_norm(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("vincent-espitalier/candle-eva2".into()); api.get("eva02_base_patch14_448.mim_in22k_ft_in22k_in1k_adapted.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = eva2::vit_base(vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/eva2/main.rs/0
{ "file_path": "candle/candle-examples/examples/eva2/main.rs", "repo_id": "candle", "token_count": 1221 }
26
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::hiera; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { Tiny, Small, Base, BasePlus, Large, Huge, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::Tiny => "tiny", Self::Small => "small", Self::Base => "base", Self::BasePlus => "base_plus", Self::Large => "large", Self::Huge => "huge", }; format!("timm/hiera_{}_224.mae_in1k_ft_in1k", name) } fn config(&self) -> hiera::Config { match self { Self::Tiny => hiera::Config::tiny(), Self::Small => hiera::Config::small(), Self::Base => hiera::Config::base(), Self::BasePlus => hiera::Config::base_plus(), Self::Large => hiera::Config::large(), Self::Huge => hiera::Config::huge(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::Tiny)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = hiera::hiera(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/hiera/main.rs/0
{ "file_path": "candle/candle-examples/examples/hiera/main.rs", "repo_id": "candle", "token_count": 1257 }
27
# candle-mamba: Mamba implementation Candle implementation of *Mamba* [1] inference only. Mamba is an alternative to the transformer architecture. It leverages State Space Models (SSMs) with the goal of being computationally efficient on long sequences. The implementation is based on [mamba.rs](https://github.com/LaurentMazare/mamba.rs). - [1]. [Mamba: Linear-Time Sequence Modeling with Selective State Spaces](https://arxiv.org/abs/2312.00752). Compared to the mamba-minimal example, this version is far more efficient but would only work for inference. ## Running the example ```bash $ cargo run --example mamba-minimal --release -- --prompt "Mamba is the" ```
candle/candle-examples/examples/mamba/README.md/0
{ "file_path": "candle/candle-examples/examples/mamba/README.md", "repo_id": "candle", "token_count": 190 }
28
# candle-mobileone [MobileOne: An Improved One millisecond Mobile Backbone](https://arxiv.org/abs/2206.04040). This candle implementation uses a pre-trained MobileOne network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example mobileone --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which s2 loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 79.33% bicycle-built-for-two, tandem bicycle, tandem: 15.32% crash helmet : 2.58% unicycle, monocycle : 1.70% alp : 0.21% ```
candle/candle-examples/examples/mobileone/README.md/0
{ "file_path": "candle/candle-examples/examples/mobileone/README.md", "repo_id": "candle", "token_count": 254 }
29
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use std::io::Write; use tokenizers::Tokenizer; use candle::quantized::gguf_file; use candle::Tensor; use candle_transformers::generation::{LogitsProcessor, Sampling}; use candle_examples::token_output_stream::TokenOutputStream; use candle_transformers::models::quantized_llama::ModelWeights as Phi3b; use candle_transformers::models::quantized_phi::ModelWeights as Phi2; use candle_transformers::models::quantized_phi3::ModelWeights as Phi3; const DEFAULT_PROMPT: &str = "Write a function to count prime numbers up to N. "; #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Which { #[value(name = "phi-2")] Phi2, #[value(name = "phi-3")] Phi3, /// Alternative implementation of phi-3, based on llama. #[value(name = "phi-3b")] Phi3b, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// GGUF file to load, typically a .gguf file generated by the quantize command from llama.cpp #[arg(long)] model: Option<String>, /// The initial prompt, use 'interactive' for entering multiple prompts in an interactive way /// and 'chat' for an interactive model where history of previous prompts and generated tokens /// is preserved. #[arg(long)] prompt: Option<String>, /// The length of the sample to generate (in tokens). #[arg(short = 'n', long, default_value_t = 1000)] sample_len: usize, /// The tokenizer config in json format. #[arg(long)] tokenizer: Option<String>, /// The temperature used to generate samples, use 0 for greedy sampling. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Only sample among the top K samples. #[arg(long)] top_k: Option<usize>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Process prompt elements separately. #[arg(long)] split_prompt: bool, /// Run on CPU rather than GPU even if a GPU is available. #[arg(long)] cpu: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The model size to use. #[arg(long, default_value = "phi-3b")] which: Which, #[arg(long)] use_flash_attn: bool, } impl Args { fn tokenizer(&self) -> anyhow::Result<Tokenizer> { let tokenizer_path = match &self.tokenizer { Some(config) => std::path::PathBuf::from(config), None => { let api = hf_hub::api::sync::Api::new()?; let repo = match self.which { Which::Phi2 => "microsoft/phi-2", Which::Phi3 | Which::Phi3b => "microsoft/Phi-3-mini-4k-instruct", }; let api = api.model(repo.to_string()); api.get("tokenizer.json")? } }; Tokenizer::from_file(tokenizer_path).map_err(anyhow::Error::msg) } fn model(&self) -> anyhow::Result<std::path::PathBuf> { let model_path = match &self.model { Some(config) => std::path::PathBuf::from(config), None => { let (repo, filename, revision) = match self.which { Which::Phi2 => ("TheBloke/phi-2-GGUF", "phi-2.Q4_K_M.gguf", "main"), Which::Phi3 => ( "microsoft/Phi-3-mini-4k-instruct-gguf", "Phi-3-mini-4k-instruct-q4.gguf", "main", ), Which::Phi3b => ( "microsoft/Phi-3-mini-4k-instruct-gguf", "Phi-3-mini-4k-instruct-q4.gguf", "5eef2ce24766d31909c0b269fe90c817a8f263fb", ), }; let api = hf_hub::api::sync::Api::new()?; api.repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, revision.to_string(), )) .get(filename)? } }; Ok(model_path) } } fn format_size(size_in_bytes: usize) -> String { if size_in_bytes < 1_000 { format!("{}B", size_in_bytes) } else if size_in_bytes < 1_000_000 { format!("{:.2}KB", size_in_bytes as f64 / 1e3) } else if size_in_bytes < 1_000_000_000 { format!("{:.2}MB", size_in_bytes as f64 / 1e6) } else { format!("{:.2}GB", size_in_bytes as f64 / 1e9) } } enum Model { Phi2(Phi2), Phi3(Phi3), Phi3b(Phi3b), } impl Model { fn forward(&mut self, xs: &Tensor, pos: usize) -> candle::Result<Tensor> { match self { Self::Phi2(m) => m.forward(xs, pos), Self::Phi3(m) => m.forward(xs, pos), Self::Phi3b(m) => m.forward(xs, pos), } } } fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature, args.repeat_penalty, args.repeat_last_n ); let model_path = args.model()?; let mut file = std::fs::File::open(&model_path)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let mut model = { let model = gguf_file::Content::read(&mut file).map_err(|e| e.with_path(model_path))?; let mut total_size_in_bytes = 0; for (_, tensor) in model.tensor_infos.iter() { let elem_count = tensor.shape.elem_count(); total_size_in_bytes += elem_count * tensor.ggml_dtype.type_size() / tensor.ggml_dtype.block_size(); } println!( "loaded {:?} tensors ({}) in {:.2}s", model.tensor_infos.len(), &format_size(total_size_in_bytes), start.elapsed().as_secs_f32(), ); match args.which { Which::Phi2 => Model::Phi2(Phi2::from_gguf(model, &mut file, &device)?), Which::Phi3 => Model::Phi3(Phi3::from_gguf( args.use_flash_attn, model, &mut file, &device, )?), Which::Phi3b => Model::Phi3b(Phi3b::from_gguf(model, &mut file, &device)?), } }; println!("model built"); let tokenizer = args.tokenizer()?; let mut tos = TokenOutputStream::new(tokenizer); let prompt_str = args.prompt.unwrap_or_else(|| DEFAULT_PROMPT.to_string()); print!("{}", &prompt_str); let tokens = tos .tokenizer() .encode(prompt_str, true) .map_err(anyhow::Error::msg)?; let tokens = tokens.get_ids(); let to_sample = args.sample_len.saturating_sub(1); let mut all_tokens = vec![]; let mut logits_processor = { let temperature = args.temperature; let sampling = if temperature <= 0. { Sampling::ArgMax } else { match (args.top_k, args.top_p) { (None, None) => Sampling::All { temperature }, (Some(k), None) => Sampling::TopK { k, temperature }, (None, Some(p)) => Sampling::TopP { p, temperature }, (Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature }, } }; LogitsProcessor::from_sampling(args.seed, sampling) }; let start_prompt_processing = std::time::Instant::now(); let mut next_token = if !args.split_prompt { let input = Tensor::new(tokens, &device)?.unsqueeze(0)?; let logits = model.forward(&input, 0)?; let logits = logits.squeeze(0)?; logits_processor.sample(&logits)? } else { let mut next_token = 0; for (pos, token) in tokens.iter().enumerate() { let input = Tensor::new(&[*token], &device)?.unsqueeze(0)?; let logits = model.forward(&input, pos)?; let logits = logits.squeeze(0)?; next_token = logits_processor.sample(&logits)? } next_token }; let prompt_dt = start_prompt_processing.elapsed(); all_tokens.push(next_token); if let Some(t) = tos.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } let eos_token = *tos .tokenizer() .get_vocab(true) .get("<|endoftext|>") .unwrap(); let start_post_prompt = std::time::Instant::now(); let mut sampled = 0; for index in 0..to_sample { let input = Tensor::new(&[next_token], &device)?.unsqueeze(0)?; let logits = model.forward(&input, tokens.len() + index)?; let logits = logits.squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = all_tokens.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &all_tokens[start_at..], )? }; next_token = logits_processor.sample(&logits)?; all_tokens.push(next_token); if let Some(t) = tos.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } sampled += 1; if next_token == eos_token { break; }; } if let Some(rest) = tos.decode_rest().map_err(candle::Error::msg)? { print!("{rest}"); } std::io::stdout().flush()?; let dt = start_post_prompt.elapsed(); println!( "\n\n{:4} prompt tokens processed: {:.2} token/s", tokens.len(), tokens.len() as f64 / prompt_dt.as_secs_f64(), ); println!( "{sampled:4} tokens generated: {:.2} token/s", sampled as f64 / dt.as_secs_f64(), ); Ok(()) }
candle/candle-examples/examples/quantized-phi/main.rs/0
{ "file_path": "candle/candle-examples/examples/quantized-phi/main.rs", "repo_id": "candle", "token_count": 5230 }
30
#![allow(unused)] //! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym) use candle::{Device, Result, Tensor}; use pyo3::prelude::*; use pyo3::types::PyDict; /// The return value for a step. #[derive(Debug)] pub struct Step<A> { pub state: Tensor, pub action: A, pub reward: f64, pub terminated: bool, pub truncated: bool, } impl<A: Copy> Step<A> { /// Returns a copy of this step changing the observation tensor. pub fn copy_with_obs(&self, state: &Tensor) -> Step<A> { Step { state: state.clone(), action: self.action, reward: self.reward, terminated: self.terminated, truncated: self.truncated, } } } /// An OpenAI Gym session. pub struct GymEnv { env: PyObject, action_space: usize, observation_space: Vec<usize>, } fn w(res: PyErr) -> candle::Error { candle::Error::wrap(res) } impl GymEnv { /// Creates a new session of the specified OpenAI Gym environment. pub fn new(name: &str) -> Result<GymEnv> { Python::with_gil(|py| { let gym = py.import_bound("gymnasium")?; let make = gym.getattr("make")?; let env = make.call1((name,))?; let action_space = env.getattr("action_space")?; let action_space = if let Ok(val) = action_space.getattr("n") { val.extract()? } else { let action_space: Vec<usize> = action_space.getattr("shape")?.extract()?; action_space[0] }; let observation_space = env.getattr("observation_space")?; let observation_space = observation_space.getattr("shape")?.extract()?; Ok(GymEnv { env: env.into(), action_space, observation_space, }) }) .map_err(w) } /// Resets the environment, returning the observation tensor. pub fn reset(&self, seed: u64) -> Result<Tensor> { let state: Vec<f32> = Python::with_gil(|py| { let kwargs = PyDict::new_bound(py); kwargs.set_item("seed", seed)?; let state = self.env.call_method_bound(py, "reset", (), Some(&kwargs))?; state.bind(py).get_item(0)?.extract() }) .map_err(w)?; Tensor::new(state, &Device::Cpu) } /// Applies an environment step using the specified action. pub fn step<A: pyo3::IntoPy<pyo3::Py<pyo3::PyAny>> + Clone>( &self, action: A, ) -> Result<Step<A>> { let (state, reward, terminated, truncated) = Python::with_gil(|py| { let step = self .env .call_method_bound(py, "step", (action.clone(),), None)?; let step = step.bind(py); let state: Vec<f32> = step.get_item(0)?.extract()?; let reward: f64 = step.get_item(1)?.extract()?; let terminated: bool = step.get_item(2)?.extract()?; let truncated: bool = step.get_item(3)?.extract()?; Ok((state, reward, terminated, truncated)) }) .map_err(w)?; let state = Tensor::new(state, &Device::Cpu)?; Ok(Step { state, action, reward, terminated, truncated, }) } /// Returns the number of allowed actions for this environment. pub fn action_space(&self) -> usize { self.action_space } /// Returns the shape of the observation tensors. pub fn observation_space(&self) -> &[usize] { &self.observation_space } }
candle/candle-examples/examples/reinforcement-learning/gym_env.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/gym_env.rs", "repo_id": "candle", "token_count": 1752 }
31
# candle-segment-anything: Segment-Anything Model This example is based on Meta AI [Segment-Anything Model](https://github.com/facebookresearch/segment-anything). This model provides a robust and fast image segmentation pipeline that can be tweaked via some prompting (requesting some points to be in the target mask, requesting some points to be part of the background so _not_ in the target mask, specifying some bounding box). The default backbone can be replaced by the smaller and faster TinyViT model based on [MobileSAM](https://github.com/ChaoningZhang/MobileSAM). ## Running some example. ```bash cargo run --example segment-anything --release -- \ --image candle-examples/examples/yolo-v8/assets/bike.jpg --use-tiny --point 0.6,0.6 --point 0.6,0.55 ``` Running this command generates a `sam_merged.jpg` file containing the original image with a blue overlay of the selected mask. The red dots represent the prompt specified by `--point 0.6,0.6 --point 0.6,0.55`, this prompt is assumed to be part of the target mask. The values used for `--point` should be a comma delimited pair of float values. They are proportional to the image dimension, i.e. use 0.5 for the image center. Original image: ![Leading group, Giro d'Italia 2021](../yolo-v8/assets/bike.jpg) Segment results by prompting with a single point `--point 0.6,0.55`: ![Leading group, Giro d'Italia 2021](./assets/single_pt_prompt.jpg) Segment results by prompting with multiple points `--point 0.6,0.6 --point 0.6,0.55`: ![Leading group, Giro d'Italia 2021](./assets/two_pt_prompt.jpg) ### Command-line flags - `--use-tiny`: use the TinyViT based MobileSAM backbone rather than the default one. - `--point`: specifies the location of the target points. - `--threshold`: sets the threshold value to be part of the mask, a negative value results in a larger mask and can be specified via `--threshold=-1.2`.
candle/candle-examples/examples/segment-anything/README.md/0
{ "file_path": "candle/candle-examples/examples/segment-anything/README.md", "repo_id": "candle", "token_count": 573 }
32
use symphonia::core::audio::{AudioBufferRef, Signal}; use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL}; use symphonia::core::conv::FromSample; fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>) where T: symphonia::core::sample::Sample, f32: symphonia::core::conv::FromSample<T>, { samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v))) } pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> anyhow::Result<(Vec<f32>, u32)> { // Open the media source. let src = std::fs::File::open(path)?; // Create the media source stream. let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default()); // Create a probe hint using the file's extension. [Optional] let hint = symphonia::core::probe::Hint::new(); // Use the default options for metadata and format readers. let meta_opts: symphonia::core::meta::MetadataOptions = Default::default(); let fmt_opts: symphonia::core::formats::FormatOptions = Default::default(); // Probe the media source. let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?; // Get the instantiated format reader. let mut format = probed.format; // Find the first audio track with a known (decodeable) codec. let track = format .tracks() .iter() .find(|t| t.codec_params.codec != CODEC_TYPE_NULL) .expect("no supported audio tracks"); // Use the default options for the decoder. let dec_opts: DecoderOptions = Default::default(); // Create a decoder for the track. let mut decoder = symphonia::default::get_codecs() .make(&track.codec_params, &dec_opts) .expect("unsupported codec"); let track_id = track.id; let sample_rate = track.codec_params.sample_rate.unwrap_or(0); let mut pcm_data = Vec::new(); // The decode loop. while let Ok(packet) = format.next_packet() { // Consume any new metadata that has been read since the last packet. while !format.metadata().is_latest() { format.metadata().pop(); } // If the packet does not belong to the selected track, skip over it. if packet.track_id() != track_id { continue; } match decoder.decode(&packet)? { AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)), AudioBufferRef::U8(data) => conv(&mut pcm_data, data), AudioBufferRef::U16(data) => conv(&mut pcm_data, data), AudioBufferRef::U24(data) => conv(&mut pcm_data, data), AudioBufferRef::U32(data) => conv(&mut pcm_data, data), AudioBufferRef::S8(data) => conv(&mut pcm_data, data), AudioBufferRef::S16(data) => conv(&mut pcm_data, data), AudioBufferRef::S24(data) => conv(&mut pcm_data, data), AudioBufferRef::S32(data) => conv(&mut pcm_data, data), AudioBufferRef::F64(data) => conv(&mut pcm_data, data), } } Ok((pcm_data, sample_rate)) }
candle/candle-examples/examples/whisper/pcm_decode.rs/0
{ "file_path": "candle/candle-examples/examples/whisper/pcm_decode.rs", "repo_id": "candle", "token_count": 1267 }
33
#include "kernels.h" #include "kernel_helpers.h" #include "flash_fwd_launch_template.h" void run_mha_fwd(Flash_fwd_params &params, cudaStream_t stream) { FP16_SWITCH(!params.is_bf16, [&] { HEADDIM_SWITCH(params.d, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { run_mha_fwd_<elem_type, kHeadDim, Is_causal>(params, stream); }); }); }); } extern "C" void run_mha( void *q_ptr, void *k_ptr, void *v_ptr, void *o_ptr, void *softmax_lse_ptr, void *alibi_slopes_ptr, int32_t *cu_seqlens_q_ptr, int32_t *cu_seqlens_k_ptr, uint32_t q_batch_stride, uint32_t k_batch_stride, uint32_t v_batch_stride, uint32_t o_batch_stride, uint32_t alibi_slopes_batch_stride, uint32_t q_row_stride, uint32_t k_row_stride, uint32_t v_row_stride, uint32_t o_row_stride, uint32_t q_head_stride, uint32_t k_head_stride, uint32_t v_head_stride, uint32_t o_head_stride, uint32_t b, uint32_t h, uint32_t h_k, uint32_t d, uint32_t d_rounded, float softmax_scale, uint32_t seqlen_q, uint32_t seqlen_k, uint32_t seqlen_q_rounded, uint32_t seqlen_k_rounded, int is_bf16, int is_causal, int window_size_left, int window_size_right ) { Flash_fwd_params params; // Reset the parameters memset(&params, 0, sizeof(params)); // Set the pointers and strides. params.q_ptr = q_ptr; params.k_ptr = k_ptr; params.v_ptr = v_ptr; params.o_ptr = o_ptr; params.softmax_lse_ptr = softmax_lse_ptr; params.alibi_slopes_ptr = alibi_slopes_ptr; // All stride are in elements, not bytes. params.q_batch_stride = q_batch_stride; params.k_batch_stride = k_batch_stride; params.v_batch_stride = v_batch_stride; params.o_batch_stride = o_batch_stride; params.alibi_slopes_batch_stride = alibi_slopes_batch_stride; params.q_row_stride = q_row_stride; params.k_row_stride = k_row_stride; params.v_row_stride = v_row_stride; params.o_row_stride = o_row_stride; params.q_head_stride = q_head_stride; params.k_head_stride = k_head_stride; params.v_head_stride = v_head_stride; params.o_head_stride = o_head_stride; // Set the dimensions. params.b = b; params.h = h; params.h_k = h_k; params.h_h_k_ratio = h / h_k; params.seqlen_q = seqlen_q; params.seqlen_k = seqlen_k; params.seqlen_q_rounded = seqlen_q_rounded; params.seqlen_k_rounded = seqlen_k_rounded; params.d = d; params.d_rounded = d_rounded; // Set the different scale values. params.scale_softmax = softmax_scale; params.scale_softmax_log2 = softmax_scale * M_LOG2E; params.p_dropout = 1.; // probability to keep params.p_dropout_in_uint8_t = uint8_t(std::floor(params.p_dropout * 255.0)); params.rp_dropout = 1.f / params.p_dropout; params.scale_softmax_rp_dropout = params.rp_dropout * params.scale_softmax; params.is_bf16 = is_bf16; params.cu_seqlens_q = cu_seqlens_q_ptr; params.cu_seqlens_k = cu_seqlens_k_ptr; params.p_ptr = nullptr; // used for `return_softmax`. params.seqused_k = nullptr; params.is_causal = is_causal; params.window_size_left = window_size_left; params.window_size_right = window_size_right; params.is_seqlens_k_cumulative = true; params.num_splits = 1; cudaStream_t stream = 0; // Use the default stream. run_mha_fwd(params, stream); }
candle/candle-flash-attn/kernels/flash_api.cu/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_api.cu", "repo_id": "candle", "token_count": 1650 }
34
[package] name = "candle-kernels" version = "0.6.1" edition = "2021" description = "CUDA kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] [build-dependencies] bindgen_cuda = "0.1.1"
candle/candle-kernels/Cargo.toml/0
{ "file_path": "candle/candle-kernels/Cargo.toml", "repo_id": "candle", "token_count": 126 }
35
#include "cuda_utils.cuh" #include<stdint.h> #define WHERE_OP(TYPENAME, ID_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const ID_TYPENAME *ids, \ const TYPENAME *t, \ const TYPENAME *f, \ TYPENAME *out \ ) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ const size_t *strides_t = info + 2*num_dims; \ const size_t *strides_f = info + 3*num_dims; \ if (is_contiguous(num_dims, dims, strides) \ && is_contiguous(num_dims, dims, strides_f) \ && is_contiguous(num_dims, dims, strides_t)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ out[i] = ids[i] ? t[i] : f[i]; \ } \ } \ else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ unsigned strided_i_t = get_strided_index(i, num_dims, dims, strides_t); \ unsigned strided_i_f = get_strided_index(i, num_dims, dims, strides_f); \ out[i] = ids[strided_i] ? t[strided_i_t] : f[strided_i_f]; \ } \ } \ } \ #if __CUDA_ARCH__ >= 800 WHERE_OP(__nv_bfloat16, int64_t, where_i64_bf16) WHERE_OP(__nv_bfloat16, uint32_t, where_u32_bf16) WHERE_OP(__nv_bfloat16, uint8_t, where_u8_bf16) #endif #if __CUDA_ARCH__ >= 530 WHERE_OP(__half, int64_t, where_i64_f16) WHERE_OP(__half, uint32_t, where_u32_f16) WHERE_OP(__half, uint8_t, where_u8_f16) #endif WHERE_OP(float, int64_t, where_i64_f32) WHERE_OP(double, int64_t, where_i64_f64) WHERE_OP(uint8_t, int64_t, where_i64_u8) WHERE_OP(uint32_t, int64_t, where_i64_u32) WHERE_OP(int64_t, int64_t, where_i64_i64) WHERE_OP(float, uint32_t, where_u32_f32) WHERE_OP(double, uint32_t, where_u32_f64) WHERE_OP(uint8_t, uint32_t, where_u32_u8) WHERE_OP(uint32_t, uint32_t, where_u32_u32) WHERE_OP(int64_t, uint32_t, where_u32_i64) WHERE_OP(float, uint8_t, where_u8_f32) WHERE_OP(double, uint8_t, where_u8_f64) WHERE_OP(uint8_t, uint8_t, where_u8_u8) WHERE_OP(uint32_t, uint8_t, where_u8_u32) WHERE_OP(int64_t, uint8_t, where_u8_i64)
candle/candle-kernels/src/ternary.cu/0
{ "file_path": "candle/candle-kernels/src/ternary.cu", "repo_id": "candle", "token_count": 1159 }
36
use super::*; use half::{bf16, f16}; use metal::MTLResourceOptions; fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> { let ptr = buffer.contents() as *const T; assert!(!ptr.is_null()); let slice = unsafe { std::slice::from_raw_parts(ptr, n) }; slice.to_vec() } fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer { let options = MTLResourceOptions::StorageModeManaged; let ptr = data.as_ptr() as *const c_void; let size = std::mem::size_of_val(data) as u64; device.new_buffer_with_data(ptr, size, options) } fn device() -> Device { Device::system_default().unwrap() } fn approx(v: Vec<f32>, digits: i32) -> Vec<f32> { let b = 10f32.powi(digits); v.iter().map(|t| f32::round(t * b) / b).collect() } fn approx_f16(v: Vec<f16>, digits: i32) -> Vec<f32> { let b = 10f32.powi(digits); v.iter().map(|t| f32::round(t.to_f32() * b) / b).collect() } fn approx_bf16(v: Vec<bf16>, digits: i32) -> Vec<f32> { let b = 10f32.powi(digits); v.iter().map(|t| f32::round(t.to_f32() * b) / b).collect() } fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let input = BufferOffset { buffer: &input, offset_in_bytes: 0, }; let output = new_buffer(&device, v); call_unary_contiguous( &device, command_buffer, &kernels, name, v.len(), input, &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, v.len()) } fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let options = MTLResourceOptions::StorageModeManaged; let left = new_buffer(&device, x); let right = new_buffer(&device, y); let output = device.new_buffer(std::mem::size_of_val(x) as u64, options); call_binary_contiguous( &device, command_buffer, &kernels, name, x.len(), BufferOffset::zero_offset(&left), BufferOffset::zero_offset(&right), &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, x.len()) } fn run_strided<T: Clone>( v: &[T], kernel: unary::strided::Kernel, shape: &[usize], strides: &[usize], offset: usize, ) -> Vec<T> { let device = device(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let input = BufferOffset { buffer: &input, offset_in_bytes: offset, }; let output_b = new_buffer(&device, v); let output = BufferOffset { buffer: &output_b, offset_in_bytes: 0, }; let kernels = Kernels::new(); call_unary_strided( &device, command_buffer, &kernels, kernel, shape, input, strides, output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output_b, v.len()) } #[test] fn cos_f32() { let v = vec![1.0f32, 2.0, 3.0]; let results = run(&v, unary::contiguous::cos::FLOAT); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!(approx(results, 4), vec![0.5403, -0.4161, -0.99]); assert_eq!(approx(expected, 4), vec![0.5403, -0.4161, -0.99]); let v = vec![1.0f32; 10_000]; let results = run(&v, unary::contiguous::cos::FLOAT); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!(approx(results, 4), vec![0.5403; 10_000]); assert_eq!(approx(expected, 4), vec![0.5403; 10_000]); } #[test] fn cos_f32_strided() { let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let shape = vec![6]; let strides = vec![1]; let offset = 0; let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!( approx(results, 4), vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602] ); assert_eq!( approx(expected, 4), vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602] ); // Contiguous let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let shape = vec![3, 2]; let strides = vec![2, 1]; let offset = 0; let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!( approx(results, 4), vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602] ); assert_eq!( approx(expected, 4), vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602] ); // Transposed let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let shape = vec![3, 2]; let strides = vec![1, 3]; let offset = 0; let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!( approx(results, 4), vec![0.5403, -0.6536, -0.4161, 0.2837, -0.99, 0.9602] ); assert_eq!( approx(expected, 4), vec![0.5403, -0.4161, -0.99, -0.6536, 0.2837, 0.9602] ); // Very large let v = vec![1.0f32; 10_000]; let shape = vec![2, 5_000]; let strides = vec![2, 1]; let offset = 0; let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!(approx(results, 4), vec![0.5403; 10_000]); assert_eq!(approx(expected, 4), vec![0.5403; 10_000]); } #[test] fn cos_strided_random() { let v: Vec<_> = (0..10_000).map(|_| rand::random::<f32>()).collect(); let shape = vec![5_000, 2]; let strides = vec![1, 5_000]; let offset = 0; let results = run_strided(&v, unary::strided::cos::FLOAT, &shape, &strides, offset); let expected: Vec<_> = v.iter().map(|v| v.cos()).collect(); assert_eq!(approx(vec![results[0]], 4), approx(vec![expected[0]], 4)); assert_eq!( approx(vec![results[1]], 4), approx(vec![expected[5_000]], 4) ); assert_eq!(approx(vec![results[2]], 4), approx(vec![expected[1]], 4)); assert_eq!( approx(vec![results[3]], 4), approx(vec![expected[5_001]], 4) ); assert_eq!( approx(vec![results[5_000]], 4), approx(vec![expected[2_500]], 4) ); } #[test] fn gelu_f16() { let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0] .iter() .map(|v| f16::from_f32(*v)) .collect(); let expected: Vec<f32> = vec![-0.0, -0.16, 0.0, 0.84, 1.96, 3.0, 10.0, 20.0]; let results = run(&v, unary::contiguous::gelu::HALF); assert_eq!(approx_f16(results, 2), expected); } #[test] fn gelu_f32() { let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]; let expected: Vec<f32> = vec![-0.0, -0.159, 0.0, 0.841, 1.955, 2.996, 10.0, 20.0]; let results = run(&v, unary::contiguous::gelu::FLOAT); assert_eq!(approx(results, 3), expected); } #[test] fn silu_f16() { let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0] .iter() .map(|v| f16::from_f32(*v)) .collect(); let expected: Vec<f32> = vec![-0.0, -0.27, 0.0, 0.73, 1.76, 2.86, 10.0, 20.0]; let results = run(&v, unary::contiguous::silu::HALF); assert_eq!(approx_f16(results, 2), expected); } #[test] fn silu_f32() { let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]; let expected: Vec<f32> = vec![-0.0, -0.269, 0.0, 0.731, 1.762, 2.858, 10.0, 20.0]; let results = run(&v, unary::contiguous::silu::FLOAT); assert_eq!(approx(results, 3), expected); } #[test] fn binary_add_f32() { let left = vec![1.0f32, 2.0, 3.0]; let right = vec![2.0f32, 3.1, 4.2]; let results = run_binary(&left, &right, binary::contiguous::add::FLOAT); let expected: Vec<_> = left .iter() .zip(right.iter()) .map(|(&x, &y)| x + y) .collect(); assert_eq!(approx(results, 4), vec![3.0f32, 5.1, 7.2]); assert_eq!(approx(expected, 4), vec![3.0f32, 5.1, 7.2]); } #[test] fn binary_ops_bf16() { let lhs: Vec<bf16> = [1.1f32, 2.2, 3.3].into_iter().map(bf16::from_f32).collect(); let rhs: Vec<bf16> = [4.2f32, 5.5f32, 6.91f32] .into_iter() .map(bf16::from_f32) .collect(); macro_rules! binary_op { ($opname:ident, $opexpr:expr) => {{ let results = run_binary(&lhs, &rhs, binary::contiguous::$opname::BFLOAT); let expected: Vec<bf16> = lhs .iter() .zip(rhs.iter()) .map(|(x, y): (&bf16, &bf16)| $opexpr(*x, *y)) .collect(); assert_eq!(results, expected); }}; } binary_op!(add, |x, y| x + y); binary_op!(sub, |x, y| x - y); binary_op!(mul, |x, y| x * y); binary_op!(div, |x, y| x / y); binary_op!(min, |x: bf16, y| x.min(y)); binary_op!(max, |x: bf16, y| x.max(y)); } fn run_cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let options = MTLResourceOptions::StorageModeManaged; let size = (v.len() * std::mem::size_of::<U>()) as u64; let output = device.new_buffer(size, options); call_cast_contiguous( &device, command_buffer, &kernels, name, v.len(), BufferOffset::zero_offset(&input), &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, v.len()) } #[test] fn cast_f32() { let v_f64 = vec![1.0f64, 2.0, 3.0]; let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect(); let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect(); let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect(); let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect(); let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect(); let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect(); // f32 -> f16 let results: Vec<half::f16> = run_cast(&v_f32, "cast_f32_f16"); assert_eq!(results, v_f16); // f32 -> bf16 let results: Vec<bf16> = run_cast(&v_f32, "cast_f32_bf16"); assert_eq!(results, v_bf16); // f32 -> u32 let results: Vec<u32> = run_cast(&v_f32, "cast_f32_u32"); assert_eq!(results, v_u32); // f32 -> u8 let results: Vec<u8> = run_cast(&v_f32, "cast_f32_u8"); assert_eq!(results, v_u8); // f32 -> i64 let results: Vec<i64> = run_cast(&v_f32, "cast_f32_i64"); assert_eq!(results, v_i64); } #[test] fn cast_f16() { let v_f64 = vec![1.0f64, 2.0, 3.0]; let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect(); let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect(); let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect(); let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect(); let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect(); let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect(); // f16 -> f32 let results: Vec<f32> = run_cast(&v_f16, "cast_f16_f32"); assert_eq!(results, v_f32); // f16 -> bf16 let results: Vec<bf16> = run_cast(&v_f16, "cast_f16_bf16"); assert_eq!(results, v_bf16); // f16 -> u32 let results: Vec<u32> = run_cast(&v_f16, "cast_f16_u32"); assert_eq!(results, v_u32); // f16 -> u8 let results: Vec<u8> = run_cast(&v_f16, "cast_f16_u8"); assert_eq!(results, v_u8); // f16 -> i64 let results: Vec<i64> = run_cast(&v_f16, "cast_f16_i64"); assert_eq!(results, v_i64); } #[test] fn cast_bf16() { let v_f64 = vec![1.0f64, 2.0, 3.0]; let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect(); let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect(); let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect(); let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect(); let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect(); let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect(); // bf16 -> f32 let results: Vec<f32> = run_cast(&v_bf16, "cast_bf16_f32"); assert_eq!(results, v_f32); // bf16 -> f16 let results: Vec<f16> = run_cast(&v_bf16, "cast_bf16_f16"); assert_eq!(results, v_f16); // bf16 -> u32 let results: Vec<u32> = run_cast(&v_bf16, "cast_bf16_u32"); assert_eq!(results, v_u32); // bf16 -> u8 let results: Vec<u8> = run_cast(&v_bf16, "cast_bf16_u8"); assert_eq!(results, v_u8); // bf16 -> i64 let results: Vec<i64> = run_cast(&v_bf16, "cast_bf16_i64"); assert_eq!(results, v_i64); } #[test] fn cast_u32() { let v_f64 = vec![1.0f64, 2.0, 3.0]; let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect(); let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect(); let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect(); let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect(); let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect(); let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect(); // u32 -> f32 let results: Vec<f32> = run_cast(&v_u32, "cast_u32_f32"); assert_eq!(results, v_f32); // u32 -> f16 let results: Vec<f16> = run_cast(&v_u32, "cast_u32_f16"); assert_eq!(results, v_f16); // u32 -> bf16 let results: Vec<bf16> = run_cast(&v_u32, "cast_u32_bf16"); assert_eq!(results, v_bf16); // u32 -> u8 let results: Vec<u8> = run_cast(&v_u32, "cast_u32_u8"); assert_eq!(results, v_u8); // u32 -> i64 let results: Vec<i64> = run_cast(&v_u32, "cast_u32_i64"); assert_eq!(results, v_i64); } #[test] fn cast_u8() { let v_f64 = vec![1.0f64, 2.0, 3.0]; let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect(); let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect(); let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect(); let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect(); let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect(); let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect(); // u8 -> f32 let results: Vec<f32> = run_cast(&v_u8, "cast_u8_f32"); assert_eq!(results, v_f32); // u8 -> f16 let results: Vec<f16> = run_cast(&v_u8, "cast_u8_f16"); assert_eq!(results, v_f16); // u8 -> bf16 let results: Vec<bf16> = run_cast(&v_u8, "cast_u8_bf16"); assert_eq!(results, v_bf16); // u8 -> u32 let results: Vec<u32> = run_cast(&v_u8, "cast_u8_u32"); assert_eq!(results, v_u32); // u8 -> i64 let results: Vec<i64> = run_cast(&v_u8, "cast_u8_i64"); assert_eq!(results, v_i64); } #[test] fn cast_i64() { let v_f64 = vec![1.0f64, 2.0, 3.0]; let v_f32: Vec<f32> = v_f64.iter().map(|&v| v as f32).collect(); let v_f16: Vec<f16> = v_f64.iter().map(|&v| f16::from_f32(v as f32)).collect(); let v_bf16: Vec<bf16> = v_f64.iter().map(|&v| bf16::from_f32(v as f32)).collect(); let v_u32: Vec<u32> = v_f64.iter().map(|&v| v as u32).collect(); let v_u8: Vec<u8> = v_f64.iter().map(|&v| v as u8).collect(); let v_i64: Vec<i64> = v_f64.iter().map(|&v| v as i64).collect(); // i64 -> f32 let results: Vec<f32> = run_cast(&v_i64, "cast_i64_f32"); assert_eq!(results, v_f32); // i64 -> f16 let results: Vec<f16> = run_cast(&v_i64, "cast_i64_f16"); assert_eq!(results, v_f16); // i64 -> bf16 let results: Vec<bf16> = run_cast(&v_i64, "cast_i64_bf16"); assert_eq!(results, v_bf16); // i64 -> u32 let results: Vec<u32> = run_cast(&v_i64, "cast_i64_u32"); assert_eq!(results, v_u32); // i64 -> u8 let results: Vec<u8> = run_cast(&v_i64, "cast_i64_u8"); assert_eq!(results, v_u8); } fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let output = new_buffer(&device, v); let size = v.len(); call_affine( &device, command_buffer, &kernels, "affine_f32", size, BufferOffset::zero_offset(&input), &output, mul as f32, add as f32, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, v.len()) } fn run_affine_strided<T: Clone>( v: &[T], shape: &[usize], strides: &[usize], mul: f64, add: f64, ) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let output = new_buffer(&device, v); call_affine_strided( &device, command_buffer, &kernels, "affine_f32_strided", shape, BufferOffset::zero_offset(&input), strides, &output, mul as f32, add as f32, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); let len: usize = shape.iter().product(); read_to_vec(&output, len) } #[test] fn affine() { let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let mul = 1.5; let add = 1.1; let result = run_affine(&input, mul, add); assert_eq!(result, vec![2.6, 4.1, 5.6, 7.1, 8.6, 10.1, 11.6, 13.1]); let input = [1.0f32; 40_000]; let mul = 1.5; let add = 1.1; let result = run_affine(&input, mul, add); assert_eq!(result, vec![2.6; 40_000]); } #[test] fn affine_strided() { let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let mul = 1.5; let add = 1.1; let shape = [4]; let strides = [2]; let result = run_affine_strided(&input, &shape, &strides, mul, add); // 1 on 2 assert_eq!(result, vec![2.6, 5.6, 8.6, 11.6]); } #[test] fn index_select() { let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]; let shape = [5, 2]; let stride = [2, 1]; let ids = [0u32, 4, 2]; let dim = 0; let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f32"); assert_eq!(result, vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0]); let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]; let shape = [2, 5]; let stride = [1, 2]; let ids = [0u32, 1, 0]; let dim = 0; let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f32"); assert_eq!( result, vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0f32, 2.0, 3.0, 4.0, 5.0] ); } #[test] fn index_select_strided() { let embedding = (0..16).map(|x| x as f32).collect::<Vec<_>>(); let shape = [2, 2]; let stride = [2, 4]; let ids = [0u32]; let dim = 0; let result = run_index_select_strided(&embedding, &shape, &stride, &ids, dim, "is_u32_f32"); assert_eq!(result, vec![0.0, 4.0]); } #[test] fn index_select_f16() { let embedding: Vec<_> = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0] .into_iter() .map(f16::from_f32) .collect(); let shape = [5, 2]; let stride = [2, 1]; let ids = [0u32, 4, 2]; let dim = 0; let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f16"); assert_eq!( approx_f16(result, 4), vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0] ); } #[test] fn index_select_is_u32_bf16() { let embedding: Vec<bf16> = (1..=10).map(|x| bf16::from_f32(x as f32)).collect(); let shape = [5, 2]; let stride = [2, 1]; let ids = [0u32, 4, 2]; let dim = 0; let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_bf16"); assert_eq!( approx_bf16(result, 4), vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0] ); } #[test] fn index_select_is_u8_bf16() { let embedding: Vec<bf16> = (1..=10).map(|x| bf16::from_f32(x as f32)).collect(); let shape = [5, 2]; let stride = [2, 1]; let ids = [0u8, 4, 2]; let dim = 0; let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u8_bf16"); assert_eq!( approx_bf16(result, 4), vec![1.0f32, 2.0, 9.0, 10.0, 5.0, 6.0] ); } #[test] fn index_select_dim1() { let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]; let shape = [5, 2]; let stride = [2, 1]; let ids = [0u32, 1, 0]; let dim = 1; let result = run_index_select(&embedding, &shape, &stride, &ids, dim, "is_u32_f32"); assert_eq!( result, vec![1.0f32, 2.0, 1.0, 3.0, 4.0, 3.0, 5.0, 6.0, 5.0, 7.0, 8.0f32, 7.0, 9.0, 10.0, 9.0] ); } fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>( embeddings: &[T], shape: &[usize], stride: &[usize], ids: &[I], dim: usize, name: &'static str, ) -> Vec<T> { let device = Device::system_default().expect("no device found"); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let embeddings_buffer = new_buffer(&device, embeddings); let ids_buffer = new_buffer(&device, ids); let left_size: usize = shape[..dim].iter().product(); let right_size: usize = shape[dim + 1..].iter().product(); let dst_el = ids.len() * left_size * right_size; let dst_buffer = new_buffer(&device, &vec![0.0f32; dst_el]); let kernels = Kernels::new(); call_index_select( &device, command_buffer, &kernels, name, shape, ids.len(), dim, true, shape, stride, BufferOffset::zero_offset(&embeddings_buffer), BufferOffset::zero_offset(&ids_buffer), &dst_buffer, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&dst_buffer, dst_el) } fn run_index_select_strided<T: Clone, I: Clone + std::fmt::Debug>( embeddings: &[T], shape: &[usize], stride: &[usize], ids: &[I], dim: usize, name: &'static str, ) -> Vec<T> { let device = Device::system_default().expect("no device found"); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let embeddings_buffer = new_buffer(&device, embeddings); let ids_buffer = new_buffer(&device, ids); let left_size: usize = shape[..dim].iter().product(); let right_size: usize = shape[dim + 1..].iter().product(); let dst_el = ids.len() * left_size * right_size; let dst_buffer = new_buffer(&device, &vec![0.0f32; dst_el]); let kernels = Kernels::new(); call_index_select( &device, command_buffer, &kernels, name, shape, ids.len(), dim, false, shape, stride, BufferOffset::zero_offset(&embeddings_buffer), BufferOffset::zero_offset(&ids_buffer), &dst_buffer, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&dst_buffer, dst_el) } #[test] fn cos_f16() { let v: Vec<f16> = [1.0f32, 2.0, 3.0] .iter() .map(|v| f16::from_f32(*v)) .collect(); let results = run(&v, unary::contiguous::cos::HALF); let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect(); assert_eq!(approx_f16(results, 2), vec![0.54, -0.42, -0.99]); assert_eq!(approx_f16(expected, 2), vec![0.54, -0.42, -0.99]); } fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let options = MTLResourceOptions::StorageModeManaged; let output = device.new_buffer((out_length * core::mem::size_of::<T>()) as u64, options); let dims = vec![v.len()]; let strides = vec![1]; call_reduce_strided( &device, command_buffer, &kernels, name, &dims, &strides, out_length, BufferOffset::zero_offset(&input), &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, out_length) } fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input = new_buffer(&device, v); let output = new_buffer(&device, v); call_last_softmax( &device, command_buffer, &kernels, name, v.len(), last_dim, &input, 0, &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, v.len()) } #[test] fn reduce_sum() { let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let out_length = 1; let results = run_reduce(&v, out_length, "fast_sum_f32_strided"); assert_eq!(approx(results, 4), vec![21.0]); } #[test] fn reduce_sum2() { let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let out_length = 2; let results = run_reduce(&v, out_length, "fast_sum_f32_strided"); assert_eq!(approx(results, 4), vec![6.0, 15.0]); } #[test] fn softmax() { let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let last_dim = 6; let results = run_softmax(&v, last_dim, "softmax_f32"); assert_eq!( approx(results, 4), vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337] ); let last_dim = 4096; let n = 200; let mut v = vec![0.0; n * last_dim]; for i in 0..n { v[i * last_dim] = 20.0; } let results = run_softmax(&v, last_dim, "softmax_f32"); let results = approx(results, 4); assert_eq!( results.iter().map(|&s| s.round() as usize).sum::<usize>(), n ); assert_eq!(results[0], 1.0); assert_eq!(results[1], 0.0); assert_eq!(results[last_dim], 1.0); assert_eq!(results[2 * last_dim], 1.0); let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0]; let last_dim = 6; let results = run_softmax(&v, last_dim, "softmax_f32"); assert_eq!( approx(results, 4), vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337] ); let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let last_dim = 3; let results = run_softmax(&v, last_dim, "softmax_f32"); assert_eq!( approx(results, 4), vec![0.0900, 0.2447, 0.6652, 0.0900, 0.2447, 0.6652] ); let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0] .iter() .map(|v| f16::from_f32(*v)) .collect::<Vec<_>>(); let last_dim = 6; let results = run_softmax(&v, last_dim, "softmax_f16"); assert_eq!( approx_f16(results, 4), vec![0.0043, 0.0116, 0.0316, 0.0858, 0.2332, 0.6338] ); let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0] .iter() .map(|v| bf16::from_f32(*v)) .collect::<Vec<_>>(); let last_dim = 6; let results = run_softmax(&v, last_dim, "softmax_bf16"); assert_eq!( approx_bf16(results, 4), vec![0.0043, 0.0116, 0.0315, 0.0859, 0.2324, 0.6328] ); } #[allow(clippy::too_many_arguments)] fn run_where_cond<I: Clone, T: Clone>( shape: &[usize], cond: &[I], (cond_stride, cond_offset): (Vec<usize>, usize), left_true: &[T], (left_stride, left_offset): (Vec<usize>, usize), right_false: &[T], (_right_stride, _right_offset): (Vec<usize>, usize), name: &'static str, ) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let options = MTLResourceOptions::StorageModeManaged; let length = cond.len(); let cond = device.new_buffer_with_data( cond.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(cond) as u64, options, ); let left = device.new_buffer_with_data( left_true.as_ptr() as *const core::ffi::c_void, (length * core::mem::size_of::<T>()) as u64, options, ); let right = device.new_buffer_with_data( right_false.as_ptr() as *const core::ffi::c_void, (length * core::mem::size_of::<T>()) as u64, options, ); let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options); let cond = BufferOffset { buffer: &cond, offset_in_bytes: cond_offset, }; let left = BufferOffset { buffer: &left, offset_in_bytes: left_offset, }; let right = BufferOffset { buffer: &right, offset_in_bytes: cond_offset, }; call_where_cond_strided( &device, command_buffer, &kernels, name, shape, cond, &cond_stride, left, &left_stride, right, &cond_stride, &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, length) } #[test] fn where_cond() { let shape = vec![6]; let cond = vec![0u8, 1, 0, 0, 1, 1]; let cond_l = (vec![1], 0); let left_true = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let left_l = (vec![1], 0); let right_false = vec![-1.0f32, -2.0, -3.0, -4.0, -5.0, -6.0]; let right_l = (vec![1], 0); let results = run_where_cond( &shape, &cond, cond_l, &left_true, left_l, &right_false, right_l, "where_u8_f32", ); assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]); } #[test] fn where_cond_u32_f32() { let shape = vec![6]; let cond = vec![0u32, 1, 0, 0, 1, 1]; let cond_l = (vec![1], 0); let left_true = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let left_l = (vec![1], 0); let right_false = vec![-1.0f32, -2.0, -3.0, -4.0, -5.0, -6.0]; let right_l = (vec![1], 0); let results = run_where_cond( &shape, &cond, cond_l, &left_true, left_l, &right_false, right_l, "where_u32_f32", ); assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]); } fn run_gemm<T: Clone>( name: &'static str, (b, m, n, k): (usize, usize, usize, usize), lhs: &[T], lhs_stride: Vec<usize>, lhs_offset: usize, rhs: &[T], rhs_stride: Vec<usize>, rhs_offset: usize, ) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let options = MTLResourceOptions::StorageModeManaged; let lhs = device.new_buffer_with_data( lhs.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(lhs) as u64, options, ); let rhs = device.new_buffer_with_data( rhs.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(rhs) as u64, options, ); let length = b * m * n; let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options); call_gemm( &device, command_buffer, &kernels, name, (b, m, n, k), &lhs_stride, lhs_offset, &lhs, &rhs_stride, rhs_offset, &rhs, &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, length) } #[test] fn gemm() { let (b, m, n, k) = (1, 2, 4, 3); let lhs_stride = vec![m * k, k, 1]; let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect(); let rhs_stride = vec![n * k, n, 1]; let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect(); let results = run_gemm( "sgemm", (b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0, ); assert_eq!( approx(results, 4), vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0] ); let (b, m, n, k) = (2, 2, 4, 3); let lhs_stride = vec![m * k, k, 1]; let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect(); let rhs_stride = vec![n * k, n, 1]; let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect(); let results = run_gemm( "sgemm", (b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0, ); assert_eq!( approx(results, 4), vec![ 20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0, 344.0, 365.0, 386.0, 407.0, 488.0, 518.0, 548.0, 578.0 ] ); // OFFSET let (b, m, n, k) = (2, 2, 4, 3); let lhs_stride = vec![m * k, k, 1]; let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect(); let rhs_stride = vec![n * k, n, 1]; let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect(); // Manually set batch_size=1 and offset 12 elements * 4 the number of bytes for f32 let results = run_gemm( "sgemm", (1, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 12 * 4, ); assert_eq!( approx(results, 4), vec![56.0, 59.0, 62.0, 65.0, 200.0, 212.0, 224.0, 236.0] ); // bgemm sanity test if false { let (b, m, n, k) = (1, 2, 4, 3); let lhs_stride = vec![m * k, k, 1]; let lhs: Vec<bf16> = (0..b * m * k).map(|f| bf16::from_f32(f as f32)).collect(); let rhs_stride = vec![n * k, n, 1]; let rhs: Vec<bf16> = (0..b * n * k).map(|f| bf16::from_f32(f as f32)).collect(); let results = run_gemm( "bgemm", (b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0, ); assert_eq!( approx_bf16(results, 4), vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0] ); } // hgemm sanity test let (b, m, n, k) = (1, 2, 4, 3); let lhs_stride = vec![m * k, k, 1]; let lhs: Vec<f16> = (0..b * m * k).map(|f| f16::from_f32(f as f32)).collect(); let rhs_stride = vec![n * k, n, 1]; let rhs: Vec<f16> = (0..b * n * k).map(|f| f16::from_f32(f as f32)).collect(); let results = run_gemm( "hgemm", (b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0, ); assert_eq!( approx_f16(results, 4), vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0] ); } fn run_random<T: Clone>(name: &'static str, seed: u32, length: usize, a: f32, b: f32) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let options = MTLResourceOptions::StorageModeManaged; let output = device.new_buffer((length * core::mem::size_of::<T>()) as NSUInteger, options); let seed = device.new_buffer_with_data( &seed as *const u32 as *const core::ffi::c_void, std::mem::size_of::<u32>() as NSUInteger, options, ); if name.starts_with("rand_uniform") { call_random_uniform( &device, command_buffer, &kernels, name, a, b, length, &seed, &output, ) .unwrap(); } else { call_random_normal( &device, command_buffer, &kernels, name, a, b, length, &seed, &output, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, length) } #[test] fn random() { fn calc_mean(data: &[f32]) -> f32 { let sum = data.iter().sum::<f32>(); let count = data.len(); assert!(count > 0); sum / count as f32 } fn calc_stddev(data: &[f32]) -> f32 { let mean = calc_mean(data); let count = data.len(); assert!(count > 0); let variance = data .iter() .map(|value| { let diff = mean - *value; diff * diff }) .sum::<f32>() / count as f32; variance.sqrt() } let shape = vec![1024, 10]; let length = shape.iter().product::<usize>(); let seed = 299792458; let min = -30.0; let max = 30.0; let mean = 100.0; let stddev = 50.0; macro_rules! validate_random { ($type:ty) => { let results: Vec<f32> = run_random::<$type>( concat!("rand_uniform_", stringify!($type)), seed, length, min, max, ) .into_iter() .map(f32::from) .collect(); results.iter().for_each(|v| { assert!(*v >= min && *v <= max); }); assert!(calc_mean(&results) > -1.0 && calc_mean(&results) < 1.0); let results: Vec<f32> = run_random::<$type>( concat!("rand_normal_", stringify!($type)), seed, length, mean, stddev, ) .into_iter() .map(f32::from) .collect(); assert!((calc_mean(&results) - mean).abs() < mean / 10.0); assert!((calc_stddev(&results) - stddev).abs() < stddev / 10.0); }; } validate_random!(f32); validate_random!(f16); validate_random!(bf16); } fn run_scatter_add<T: Clone, I: Clone + std::fmt::Debug>( input: &[T], ids: &[I], shape: &[usize], dim: usize, name: &'static str, ) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let options = MTLResourceOptions::StorageModeManaged; let input_buffer = new_buffer(&device, input); let ids_buffer = new_buffer(&device, ids); let output = device.new_buffer(std::mem::size_of_val(input) as u64, options); call_scatter_add( &device, command_buffer, &kernels, name, shape, shape, dim, BufferOffset::zero_offset(&input_buffer), BufferOffset::zero_offset(&ids_buffer), &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, input.len()) } #[test] fn scatter_add() { let ids_u8 = [0u8, 0, 1, 0, 2, 2, 3, 3]; let ids_u32 = [0u32, 0, 1, 0, 2, 2, 3, 3]; let ids_i64 = [0i64, 0, 1, 0, 2, 2, 3, 3]; let input_f32 = [5.0f32, 1.0, 7.0, 2.0, 3.0, 2.0, 1.0, 3.0]; let input_f16 = input_f32 .iter() .map(|v| f16::from_f32(*v)) .collect::<Vec<_>>(); let input_bf16 = input_f32 .iter() .map(|v| bf16::from_f32(*v)) .collect::<Vec<_>>(); let output_dim1_f32 = vec![8.0, 7.0, 5.0, 4.0, 0.0, 0.0, 0.0, 0.0]; let output_dim1_f16 = output_dim1_f32 .iter() .map(|v| f16::from_f32(*v)) .collect::<Vec<_>>(); let output_dim1_bf16 = output_dim1_f32 .iter() .map(|v| bf16::from_f32(*v)) .collect::<Vec<_>>(); let output_dim2_f32 = vec![5.0, 3.0, 7.0, 0.0, 3.0, 2.0, 1.0, 3.0]; let output_dim2_f16 = output_dim2_f32 .iter() .map(|v| f16::from_f32(*v)) .collect::<Vec<_>>(); let output_dim2_bf16 = output_dim2_f32 .iter() .map(|v| bf16::from_f32(*v)) .collect::<Vec<_>>(); for (shape, output_f32, output_f16, output_bf16) in [ (vec![8], output_dim1_f32, output_dim1_f16, output_dim1_bf16), ( vec![4, 2], output_dim2_f32, output_dim2_f16, output_dim2_bf16, ), ] { for results in [ run_scatter_add(&input_f32, &ids_u8, &shape, 0, "sa_u8_f32"), run_scatter_add(&input_f32, &ids_u32, &shape, 0, "sa_u32_f32"), run_scatter_add(&input_f32, &ids_i64, &shape, 0, "sa_i64_f32"), ] { assert_eq!(results, output_f32); } for results in [ run_scatter_add(&input_f16, &ids_u8, &shape, 0, "sa_u8_f16"), run_scatter_add(&input_f16, &ids_u32, &shape, 0, "sa_u32_f16"), run_scatter_add(&input_f16, &ids_i64, &shape, 0, "sa_i64_f16"), ] { assert_eq!(results, output_f16); } for results in [ run_scatter_add(&input_bf16, &ids_u8, &shape, 0, "sa_u8_bf16"), run_scatter_add(&input_bf16, &ids_u32, &shape, 0, "sa_u32_bf16"), run_scatter_add(&input_bf16, &ids_i64, &shape, 0, "sa_i64_bf16"), ] { assert_eq!(results, output_bf16); } } } fn run_index_add<T: Clone, I: Clone + std::fmt::Debug>( left: &[T], right: &[T], indices: &[I], shape: &[usize], dim: usize, name: &'static str, ) -> Vec<T> { let device = device(); let kernels = Kernels::new(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let input_buffer = new_buffer(&device, right); let output = new_buffer(&device, left); let indices_buffer = new_buffer(&device, indices); call_index_add( &device, command_buffer, &kernels, name, shape, shape, shape, dim, BufferOffset::zero_offset(&input_buffer), BufferOffset::zero_offset(&indices_buffer), &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, left.len()) } #[test] fn index_add() { let left = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let right = vec![1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0]; let indices = vec![0u32, 1, 0, 1, 0, 1]; let shape = vec![6]; // u32, f32 { let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u32_f32"); assert_eq!(results, vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // u32, f16 { let left = left.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let right = right.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u32_f16"); assert_eq!(approx_f16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // u32, bf16 { let left = left.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let right = right.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u32_bf16"); assert_eq!(approx_bf16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // u8, f32 { let indices = indices.iter().map(|v| *v as u8).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u8_f32"); assert_eq!(results, vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // u8, f16 { let indices = indices.iter().map(|v| *v as u8).collect::<Vec<_>>(); let left = left.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let right = right.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u8_f16"); assert_eq!(approx_f16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // u8, bf16 { let indices = indices.iter().map(|v| *v as u8).collect::<Vec<_>>(); let left = left.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let right = right.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_u8_bf16"); assert_eq!(approx_bf16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // i64, f32 { let indices = indices.iter().map(|v| *v as i64).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_i64_f32"); assert_eq!(results, vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // i64, f16 { let indices = indices.iter().map(|v| *v as i64).collect::<Vec<_>>(); let left = left.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let right = right.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_i64_f16"); assert_eq!(approx_f16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } // i64, bf16 { let indices = indices.iter().map(|v| *v as i64).collect::<Vec<_>>(); let left = left.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let right = right.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let results = run_index_add(&left, &right, &indices, &shape, 0, "ia_i64_bf16"); assert_eq!(approx_bf16(results, 4), vec![4.0, 5.0, 3.0, 4.0, 5.0, 6.0]); } } fn run_pool2d<T: Clone>( v: &[T], (w_k, h_k): (usize, usize), (w_stride, h_stride): (usize, usize), shape: &[usize], strides: &[usize], name: &'static str, ) -> Vec<T> { let device = device(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let out_w = (shape[2] - w_k) / w_stride + 1; let out_h = (shape[3] - h_k) / h_stride + 1; let dst_el = out_w * out_h * shape[0] * shape[1]; let input = new_buffer(&device, v); let output = new_buffer(&device, &vec![0.0f32; dst_el]); let kernels = Kernels::new(); call_pool2d( &device, command_buffer, &kernels, name, shape, strides, out_w, out_h, w_k, h_k, w_stride, h_stride, &input, &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, dst_el) } #[test] fn max_pool2d_f32() { // kernel 2 stride 1 let v: Vec<f32> = (0..16).map(|v| v as f32).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_f32", ); let expected = vec![5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 13.0, 14.0, 15.0]; assert_eq!(results, expected); // kernel 2 stride 2 let v: Vec<f32> = (0..16).map(|v| v as f32).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 2; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_f32", ); let expected = vec![5.0, 7.0, 13.0, 15.0]; assert_eq!(results, expected); } #[test] fn max_pool2d_f16() { // kernel 2 stride 1 let v: Vec<half::f16> = (0..16).map(|v| half::f16::from_f32(v as f32)).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_f16", ); let expected = vec![5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 13.0, 14.0, 15.0] .iter() .map(|v| half::f16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); // kernel 2 stride 2 let v: Vec<half::f16> = (0..16).map(|v| half::f16::from_f32(v as f32)).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 2; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_f16", ); let expected = vec![5.0, 7.0, 13.0, 15.0] .iter() .map(|v| half::f16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); } #[test] fn max_pool2d_bf16() { // kernel 2 stride 1 let v: Vec<half::bf16> = (0..16).map(|v| half::bf16::from_f32(v as f32)).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_bf16", ); let expected = vec![5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 13.0, 14.0, 15.0] .iter() .map(|v| half::bf16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); // kernel 2 stride 2 let v: Vec<half::bf16> = (0..16).map(|v| half::bf16::from_f32(v as f32)).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 2; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_bf16", ); let expected = vec![5.0, 7.0, 13.0, 15.0] .iter() .map(|v| half::bf16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); } #[test] fn max_pool2d_u8() { // kernel 2 stride 1 let v: Vec<u8> = (0..16).map(|v| v as u8).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_u8", ); let expected = vec![5, 6, 7, 9, 10, 11, 13, 14, 15]; assert_eq!(results, expected); // kernel 2 stride 2 let v: Vec<u8> = (0..16).map(|v| v as u8).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 2; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_u8", ); let expected = vec![5, 7, 13, 15]; assert_eq!(results, expected); } #[test] fn max_pool2d_u32() { // kernel 2 stride 1 let v: Vec<u32> = (0..16).map(|v| v as u32).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_u32", ); let expected = vec![5, 6, 7, 9, 10, 11, 13, 14, 15]; assert_eq!(results, expected); // kernel 2 stride 2 let v: Vec<u32> = (0..16).map(|v| v as u32).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 2; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "max_pool2d_u32", ); let expected = vec![5, 7, 13, 15]; assert_eq!(results, expected); } #[test] fn avg_pool2d_f32() { // kernel 2 stride 1 let v: Vec<f32> = (0..16).map(|v| v as f32).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "avg_pool2d_f32", ); let expected = vec![ 2.5000, 3.5000, 4.5000, 6.5000, 7.5000, 8.5000, 10.5000, 11.5000, 12.5000, ]; assert_eq!(results, expected); } #[test] fn avg_pool2d_f16() { // kernel 2 stride 1 let v: Vec<f16> = (0..16).map(|v| f16::from_f32(v as f32)).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "avg_pool2d_f16", ); let expected = vec![ 2.5000, 3.5000, 4.5000, 6.5000, 7.5000, 8.5000, 10.5000, 11.5000, 12.5000, ] .iter() .map(|v| f16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); } #[test] fn avg_pool2d_bf16() { // kernel 2 stride 1 let v: Vec<bf16> = (0..16).map(|v| bf16::from_f32(v as f32)).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "avg_pool2d_bf16", ); let expected = vec![ 2.5000, 3.5000, 4.5000, 6.5000, 7.5000, 8.5000, 10.5000, 11.5000, 12.5000, ] .iter() .map(|v| bf16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); } #[test] fn avg_pool2d_u8() { // kernel 2 stride 1 let v: Vec<u8> = (0..16).map(|v| v as u8).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "avg_pool2d_u8", ); let expected = vec![2, 3, 4, 6, 7, 8, 10, 11, 12]; assert_eq!(results, expected); } #[test] fn avg_pool2d_u32() { // kernel 2 stride 1 let v: Vec<u32> = (0..16).map(|v| v as u32).collect(); let shape = vec![1, 1, 4, 4]; let strides = vec![16, 16, 4, 1]; let kernel = 2; let stride = 1; let results = run_pool2d( &v, (kernel, kernel), (stride, stride), &shape, &strides, "avg_pool2d_u32", ); let expected = vec![2, 3, 4, 6, 7, 8, 10, 11, 12]; assert_eq!(results, expected); } #[allow(clippy::too_many_arguments)] fn run_conv_transpose1d<T: Clone>( input: &[T], input_shape: &[usize], input_stride: &[usize], kernel: &[T], kernel_shape: &[usize], kernel_stride: &[usize], dilation: usize, stride: usize, padding: usize, out_padding: usize, name: &'static str, ) -> Vec<T> { let device = device(); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer(); let c_out = kernel_shape[1]; let k_size = kernel_shape[2]; let b_size = input_shape[0]; let l_in = input_shape[2]; let l_out = (l_in - 1) * stride - 2 * padding + dilation * (k_size - 1) + out_padding + 1; let dst_el = c_out * l_out * b_size; let input = new_buffer(&device, input); let kernel = new_buffer(&device, kernel); let output = new_buffer(&device, &vec![0.0f32; dst_el]); let kernels = Kernels::new(); call_conv_transpose1d( &device, command_buffer, &kernels, name, dilation, stride, padding, out_padding, c_out, l_out, b_size, input_shape, input_stride, kernel_shape, kernel_stride, &input, 0, &kernel, 0, &output, ) .unwrap(); command_buffer.commit(); command_buffer.wait_until_completed(); read_to_vec(&output, dst_el) } #[test] fn conv_transpose1d_f32() { let input = vec![1.0f32, 2.0, 3.0, 4.0]; let input_shape = &[1, 1, 4]; let input_stride = &[4, 4, 1]; let kernel = vec![1.0f32, 2.0, 3.0, 4.0]; let kernel_shape = &[1, 1, 4]; let kernel_stride = &[4, 4, 1]; let results = run_conv_transpose1d( &input, input_shape, input_stride, &kernel, kernel_shape, kernel_stride, 1, 1, 0, 0, "conv_transpose1d_f32", ); let expected = vec![1., 4., 10., 20., 25., 24., 16.]; assert_eq!(results, expected); } #[test] fn conv_transpose1d_f16() { let input: Vec<f16> = vec![1.0, 2.0, 3.0, 4.0] .iter() .map(|v| f16::from_f32(*v)) .collect(); let input_shape = &[1, 1, 4]; let input_stride = &[4, 4, 1]; let kernel: Vec<f16> = vec![1.0, 2.0, 3.0, 4.0] .iter() .map(|v| f16::from_f32(*v)) .collect(); let kernel_shape = &[1, 1, 4]; let kernel_stride = &[4, 4, 1]; let results = run_conv_transpose1d( &input, input_shape, input_stride, &kernel, kernel_shape, kernel_stride, 1, 1, 0, 0, "conv_transpose1d_f16", ); let expected = vec![1., 4., 10., 20., 25., 24., 16.] .iter() .map(|v| f16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); } #[test] fn conv_transpose1d_bf16() { let input: Vec<bf16> = vec![1.0, 2.0, 3.0, 4.0] .iter() .map(|v| bf16::from_f32(*v)) .collect(); let input_shape = &[1, 1, 4]; let input_stride = &[4, 4, 1]; let kernel: Vec<bf16> = vec![1.0, 2.0, 3.0, 4.0] .iter() .map(|v| bf16::from_f32(*v)) .collect(); let kernel_shape = &[1, 1, 4]; let kernel_stride = &[4, 4, 1]; let results = run_conv_transpose1d( &input, input_shape, input_stride, &kernel, kernel_shape, kernel_stride, 1, 1, 0, 0, "conv_transpose1d_bf16", ); let expected = vec![1., 4., 10., 20., 25., 24., 16.] .iter() .map(|v| bf16::from_f32(*v)) .collect::<Vec<_>>(); assert_eq!(results, expected); } #[test] fn conv_transpose1d_u8() { let input: Vec<u8> = vec![1, 2, 3, 4]; let input_shape = &[1, 1, 4]; let input_stride = &[4, 4, 1]; let kernel: Vec<u8> = vec![1, 2, 3, 4]; let kernel_shape = &[1, 1, 4]; let kernel_stride = &[4, 4, 1]; let results = run_conv_transpose1d( &input, input_shape, input_stride, &kernel, kernel_shape, kernel_stride, 1, 1, 0, 0, "conv_transpose1d_u8", ); let expected = vec![1, 4, 10, 20, 25, 24, 16]; assert_eq!(results, expected); } #[test] fn conv_transpose1d_u32() { let input: Vec<u32> = vec![1, 2, 3, 4]; let input_shape = &[1, 1, 4]; let input_stride = &[4, 4, 1]; let kernel: Vec<u32> = vec![1, 2, 3, 4]; let kernel_shape = &[1, 1, 4]; let kernel_stride = &[4, 4, 1]; let results = run_conv_transpose1d( &input, input_shape, input_stride, &kernel, kernel_shape, kernel_stride, 1, 1, 0, 0, "conv_transpose1d_u32", ); let expected = vec![1, 4, 10, 20, 25, 24, 16]; assert_eq!(results, expected); }
candle/candle-metal-kernels/src/tests.rs/0
{ "file_path": "candle/candle-metal-kernels/src/tests.rs", "repo_id": "candle", "token_count": 31494 }
37
//! Batch Normalization. //! //! This layer applies Batch Normalization over a mini-batch of inputs as described in [`Batch //! Normalization`]. The input is expected to have at least three dimensions. //! //! Note that this implementation is for inference only, there is no possibility to track the //! running stats. //! //! [`Batch Normalization`]: https://arxiv.org/abs/1502.03167 use candle::{DType, Result, Tensor, Var}; #[derive(Debug, Clone, Copy, PartialEq)] pub struct BatchNormConfig { pub eps: f64, pub remove_mean: bool, /// The meaning of affine here is different from LayerNorm: when false there is no learnable /// parameter at all, 1 used for gamma and 0 for beta. pub affine: bool, /// Controls exponential moving average of running stats. Defaults to 0.1 /// /// `running_stat * (1.0 - momentum) + stat * momentum`. pub momentum: f64, } impl Default for BatchNormConfig { fn default() -> Self { Self { eps: 1e-5, remove_mean: true, affine: true, momentum: 0.1, } } } impl From<f64> for BatchNormConfig { fn from(eps: f64) -> Self { Self { eps, ..Default::default() } } } #[derive(Clone, Debug)] pub struct BatchNorm { running_mean: Var, running_var: Var, weight_and_bias: Option<(Tensor, Tensor)>, remove_mean: bool, eps: f64, momentum: f64, } impl BatchNorm { fn check_validity(&self, num_features: usize) -> Result<()> { if self.eps < 0. { candle::bail!("batch-norm eps cannot be negative {}", self.eps) } if !(0.0..=1.0).contains(&self.momentum) { candle::bail!( "batch-norm momentum must be between 0 and 1, is {}", self.momentum ) } if self.running_mean.dims() != [num_features] { candle::bail!( "batch-norm running mean has unexpected shape {:?} should have shape [{num_features}]", self.running_mean.shape(), ) } if self.running_var.dims() != [num_features] { candle::bail!( "batch-norm running variance has unexpected shape {:?} should have shape [{num_features}]", self.running_var.shape(), ) } if let Some((ref weight, ref bias)) = self.weight_and_bias.as_ref() { if weight.dims() != [num_features] { candle::bail!( "batch-norm weight has unexpected shape {:?} should have shape [{num_features}]", weight.shape(), ) } if bias.dims() != [num_features] { candle::bail!( "batch-norm weight has unexpected shape {:?} should have shape [{num_features}]", bias.shape(), ) } } Ok(()) } pub fn new( num_features: usize, running_mean: Tensor, running_var: Tensor, weight: Tensor, bias: Tensor, eps: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: Some((weight, bias)), remove_mean: true, eps, momentum: 0.1, }; out.check_validity(num_features)?; Ok(out) } pub fn new_no_bias( num_features: usize, running_mean: Tensor, running_var: Tensor, eps: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: None, remove_mean: true, eps, momentum: 0.1, }; out.check_validity(num_features)?; Ok(out) } pub fn new_with_momentum( num_features: usize, running_mean: Tensor, running_var: Tensor, weight: Tensor, bias: Tensor, eps: f64, momentum: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: Some((weight, bias)), remove_mean: true, eps, momentum, }; out.check_validity(num_features)?; Ok(out) } pub fn new_no_bias_with_momentum( num_features: usize, running_mean: Tensor, running_var: Tensor, eps: f64, momentum: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: None, remove_mean: true, eps, momentum, }; out.check_validity(num_features)?; Ok(out) } pub fn running_mean(&self) -> &Tensor { self.running_mean.as_tensor() } pub fn running_var(&self) -> &Tensor { self.running_var.as_tensor() } pub fn eps(&self) -> f64 { self.eps } pub fn weight_and_bias(&self) -> Option<(&Tensor, &Tensor)> { self.weight_and_bias.as_ref().map(|v| (&v.0, &v.1)) } pub fn momentum(&self) -> f64 { self.momentum } pub fn forward_train(&self, x: &Tensor) -> Result<Tensor> { let num_features = self.running_mean.as_tensor().dim(0)?; let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; if x.rank() < 2 { candle::bail!( "batch-norm input tensor must have at least two dimensions ({:?})", x.shape() ) } if x.dim(1)? != num_features { candle::bail!( "batch-norm input doesn't have the expected number of features ({:?} <> {})", x.shape(), num_features ) } let x = x.to_dtype(internal_dtype)?; let x = x.transpose(0, 1)?; let x_dims_post_transpose = x.dims(); // Flatten all the dimensions exception the channel one as this performs a Spatial Batch // Normalization. let x = x.flatten_from(1)?.contiguous()?; let x = if self.remove_mean { // The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above. let mean_x = x.mean_keepdim(1)?; let updated_running_mean = ((self.running_mean.as_tensor() * (1.0 - self.momentum))? + (mean_x.flatten_all()? * self.momentum)?)?; self.running_mean.set(&updated_running_mean)?; x.broadcast_sub(&mean_x)? } else { x }; // The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above. let norm_x = x.sqr()?.mean_keepdim(1)?; let updated_running_var = { let batch_size = x.dim(1)? as f64; let running_var_weight = 1.0 - self.momentum; let norm_x_weight = self.momentum * batch_size / (batch_size - 1.0); ((self.running_var.as_tensor() * running_var_weight)? + (&norm_x.flatten_all()? * norm_x_weight)?)? }; self.running_var.set(&updated_running_var)?; let x = x .broadcast_div(&(norm_x + self.eps)?.sqrt()?)? .to_dtype(x_dtype)?; let x = match &self.weight_and_bias { None => x, Some((weight, bias)) => { let weight = weight.reshape(((), 1))?; let bias = bias.reshape(((), 1))?; x.broadcast_mul(&weight)?.broadcast_add(&bias)? } }; x.reshape(x_dims_post_transpose)?.transpose(0, 1) } fn forward_eval(&self, x: &Tensor) -> Result<Tensor> { let target_shape: Vec<usize> = x .dims() .iter() .enumerate() .map(|(idx, v)| if idx == 1 { *v } else { 1 }) .collect(); let target_shape = target_shape.as_slice(); let x = x .broadcast_sub( &self .running_mean .as_detached_tensor() .reshape(target_shape)?, )? .broadcast_div( &(self .running_var .as_detached_tensor() .reshape(target_shape)? + self.eps)? .sqrt()?, )?; match &self.weight_and_bias { None => Ok(x), Some((weight, bias)) => { let weight = weight.reshape(target_shape)?; let bias = bias.reshape(target_shape)?; x.broadcast_mul(&weight)?.broadcast_add(&bias) } } } } impl crate::ModuleT for BatchNorm { fn forward_t(&self, x: &Tensor, train: bool) -> Result<Tensor> { if train { self.forward_train(x) } else { self.forward_eval(x) } } } pub fn batch_norm<C: Into<BatchNormConfig>>( num_features: usize, config: C, vb: crate::VarBuilder, ) -> Result<BatchNorm> { use crate::Init; let config = config.into(); if config.eps < 0. { candle::bail!("batch-norm eps cannot be negative {}", config.eps) } let running_mean = vb.get_with_hints(num_features, "running_mean", Init::Const(0.))?; let running_var = vb.get_with_hints(num_features, "running_var", Init::Const(1.))?; let weight_and_bias = if config.affine { let weight = vb.get_with_hints(num_features, "weight", Init::Const(1.))?; let bias = vb.get_with_hints(num_features, "bias", Init::Const(0.))?; Some((weight, bias)) } else { None }; Ok(BatchNorm { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias, remove_mean: config.remove_mean, eps: config.eps, momentum: config.momentum, }) }
candle/candle-nn/src/batch_norm.rs/0
{ "file_path": "candle/candle-nn/src/batch_norm.rs", "repo_id": "candle", "token_count": 5325 }
38
//! A sequential layer used to chain multiple layers and closures. use candle::{Module, Result, Tensor}; /// A sequential layer combining multiple other layers. pub struct Sequential { layers: Vec<Box<dyn Module>>, } /// Creates a new empty sequential layer. pub fn seq() -> Sequential { Sequential { layers: vec![] } } impl Sequential { /// The number of sub-layers embedded in this layer. pub fn len(&self) -> i64 { self.layers.len() as i64 } /// Returns true if this layer does not have any sub-layer. pub fn is_empty(&self) -> bool { self.layers.is_empty() } } impl Module for Sequential { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)? } Ok(xs) } } impl Sequential { /// Appends a layer after all the current layers. #[allow(clippy::should_implement_trait)] pub fn add<M: Module + 'static>(mut self, layer: M) -> Self { self.layers.push(Box::new(layer)); self } /// Appends a closure after all the current layers. pub fn add_fn<F>(self, f: F) -> Self where F: 'static + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { self.add(super::func(f)) } /// Applies the forward pass and returns the output for each layer. pub fn forward_all(&self, xs: &Tensor) -> Result<Vec<Tensor>> { let mut vec = Vec::with_capacity(self.layers.len()); let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)?; vec.push(xs.clone()) } Ok(vec) } }
candle/candle-nn/src/sequential.rs/0
{ "file_path": "candle/candle-nn/src/sequential.rs", "repo_id": "candle", "token_count": 705 }
39
// // WARNING: This file is automatically generated! Please edit onnx.in.proto. // // SPDX-License-Identifier: Apache-2.0 syntax = "proto3"; package onnx; // Overview // // ONNX is an open specification that is comprised of the following components: // // 1) A definition of an extensible computation graph model. // 2) Definitions of standard data types. // 3) Definitions of built-in operators. // // This document describes the syntax of models and their computation graphs, // as well as the standard data types. Together, they are referred to as the ONNX // Intermediate Representation, or 'IR' for short. // // The normative semantic specification of the ONNX IR is found in docs/IR.md. // Definitions of the built-in neural network operators may be found in docs/Operators.md. // Notes // // Protobuf compatibility // // To simplify framework compatibility, ONNX is defined using the subset of protobuf // that is compatible with both protobuf v2 and v3. This means that we do not use any // protobuf features that are only available in one of the two versions. // // Here are the most notable contortions we have to carry out to work around // these limitations: // // - No 'map' (added protobuf 3.0). We instead represent mappings as lists // of key-value pairs, where order does not matter and duplicates // are not allowed. // Versioning // // ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md // // To be compatible with both proto2 and proto3, we will use a version number // that is not defined by the default value but an explicit enum number. enum Version { // proto3 requires the first enum value to be zero. // We add this just to appease the compiler. _START_VERSION = 0; // The version field is always serialized and we will use it to store the // version that the graph is generated from. This helps us set up version // control. // For the IR, we are using simple numbers starting with 0x00000001, // which was the version we published on Oct 10, 2017. IR_VERSION_2017_10_10 = 0x0000000000000001; // IR_VERSION 2 published on Oct 30, 2017 // - Added type discriminator to AttributeProto to support proto3 users IR_VERSION_2017_10_30 = 0x0000000000000002; // IR VERSION 3 published on Nov 3, 2017 // - For operator versioning: // - Added new message OperatorSetIdProto // - Added opset_import in ModelProto // - For vendor extensions, added domain in NodeProto IR_VERSION_2017_11_3 = 0x0000000000000003; // IR VERSION 4 published on Jan 22, 2019 // - Relax constraint that initializers should be a subset of graph inputs // - Add type BFLOAT16 IR_VERSION_2019_1_22 = 0x0000000000000004; // IR VERSION 5 published on March 18, 2019 // - Add message TensorAnnotation. // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. IR_VERSION_2019_3_18 = 0x0000000000000005; // IR VERSION 6 published on Sep 19, 2019 // - Add support for sparse tensor constants stored in model. // - Add message SparseTensorProto // - Add sparse initializers IR_VERSION_2019_9_19 = 0x0000000000000006; // IR VERSION 7 published on May 8, 2020 // - Add support to allow function body graph to rely on multiple external opreator sets. // - Add a list to promote inference graph's initializers to global and // mutable variables. Global variables are visible in all graphs of the // stored models. // - Add message TrainingInfoProto to store initialization // method and training algorithm. The execution of TrainingInfoProto // can modify the values of mutable variables. // - Implicitly add inference graph into each TrainingInfoProto's algorithm. IR_VERSION_2020_5_8 = 0x0000000000000007; // IR VERSION 8 published on July 30, 2021 // Introduce TypeProto.SparseTensor // Introduce TypeProto.Optional // Added a list of FunctionProtos local to the model // Deprecated since_version and operator status from FunctionProto IR_VERSION_2021_7_30 = 0x0000000000000008; // IR VERSION 9 published on May 5, 2023 // Added AttributeProto to FunctionProto so that default attribute values can be set. // Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ. IR_VERSION = 0x0000000000000009; } // Attributes // // A named attribute containing either singular float, integer, string, graph, // and tensor values, or repeated float, integer, string, graph, and tensor values. // An AttributeProto MUST contain the name field, and *only one* of the // following content fields, effectively enforcing a C/C++ union equivalent. message AttributeProto { reserved 12, 16 to 19; reserved "v"; // Note: this enum is structurally identical to the OpSchema::AttrType // enum defined in schema.h. If you rev one, you likely need to rev the other. enum AttributeType { UNDEFINED = 0; FLOAT = 1; INT = 2; STRING = 3; TENSOR = 4; GRAPH = 5; SPARSE_TENSOR = 11; TYPE_PROTO = 13; FLOATS = 6; INTS = 7; STRINGS = 8; TENSORS = 9; GRAPHS = 10; SPARSE_TENSORS = 12; TYPE_PROTOS = 14; } // The name field MUST be present for this version of the IR. string name = 1; // namespace Attribute // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. // In this case, this AttributeProto does not contain data, and it's a reference of attribute // in parent scope. // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. string ref_attr_name = 21; // A human-readable documentation for this attribute. Markdown is allowed. string doc_string = 13; // The type field MUST be present for this version of the IR. // For 0.0.1 versions of the IR, this field was not defined, and // implementations needed to use has_field heuristics to determine // which value field was in use. For IR_VERSION 0.0.2 or later, this // field MUST be set and match the f|i|s|t|... field in use. This // change was made to accommodate proto3 implementations. AttributeType type = 20; // discriminator that indicates which field below is in use // Exactly ONE of the following fields must be present for this version of the IR float f = 2; // float int64 i = 3; // int bytes s = 4; // UTF-8 string TensorProto t = 5; // tensor value GraphProto g = 6; // graph SparseTensorProto sparse_tensor = 22; // sparse tensor value // Do not use field below, it's deprecated. // optional ValueProto v = 12; // value - subsumes everything but graph TypeProto tp = 14; // type proto repeated float floats = 7; // list of floats repeated int64 ints = 8; // list of ints repeated bytes strings = 9; // list of UTF-8 strings repeated TensorProto tensors = 10; // list of tensors repeated GraphProto graphs = 11; // list of graph repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors repeated TypeProto type_protos = 15;// list of type protos } // Defines information on value, including the name, the type, and // the shape of the value. message ValueInfoProto { // This field MUST be present in this version of the IR. string name = 1; // namespace Value // This field MUST be present in this version of the IR for // inputs and outputs of the top-level graph. TypeProto type = 2; // A human-readable documentation for this value. Markdown is allowed. string doc_string = 3; } // Nodes // // Computation graphs are made up of a DAG of nodes, which represent what is // commonly called a "layer" or "pipeline stage" in machine learning frameworks. // // For example, it can be a node of type "Conv" that takes in an image, a filter // tensor and a bias tensor, and produces the convolved output. message NodeProto { repeated string input = 1; // namespace Value repeated string output = 2; // namespace Value // An optional identifier for this node in a graph. // This field MAY be absent in ths version of the IR. string name = 3; // namespace Node // The symbolic identifier of the Operator to execute. string op_type = 4; // namespace Operator // The domain of the OperatorSet that specifies the operator named by op_type. string domain = 7; // namespace Domain // Additional named attributes. repeated AttributeProto attribute = 5; // A human-readable documentation for this node. Markdown is allowed. string doc_string = 6; } // Training information // TrainingInfoProto stores information for training a model. // In particular, this defines two functionalities: an initialization-step // and a training-algorithm-step. Initialization resets the model // back to its original state as if no training has been performed. // Training algorithm improves the model based on input data. // // The semantics of the initialization-step is that the initializers // in ModelProto.graph and in TrainingInfoProto.algorithm are first // initialized as specified by the initializers in the graph, and then // updated by the "initialization_binding" in every instance in // ModelProto.training_info. // // The field "algorithm" defines a computation graph which represents a // training algorithm's step. After the execution of a // TrainingInfoProto.algorithm, the initializers specified by "update_binding" // may be immediately updated. If the targeted training algorithm contains // consecutive update steps (such as block coordinate descent methods), // the user needs to create a TrainingInfoProto for each step. message TrainingInfoProto { // This field describes a graph to compute the initial tensors // upon starting the training process. Initialization graph has no input // and can have multiple outputs. Usually, trainable tensors in neural // networks are randomly initialized. To achieve that, for each tensor, // the user can put a random number operator such as RandomNormal or // RandomUniform in TrainingInfoProto.initialization.node and assign its // random output to the specific tensor using "initialization_binding". // This graph can also set the initializers in "algorithm" in the same // TrainingInfoProto; a use case is resetting the number of training // iteration to zero. // // By default, this field is an empty graph and its evaluation does not // produce any output. Thus, no initializer would be changed by default. GraphProto initialization = 1; // This field represents a training algorithm step. Given required inputs, // it computes outputs to update initializers in its own or inference graph's // initializer lists. In general, this field contains loss node, gradient node, // optimizer node, increment of iteration count. // // An execution of the training algorithm step is performed by executing the // graph obtained by combining the inference graph (namely "ModelProto.graph") // and the "algorithm" graph. That is, the actual // input/initializer/output/node/value_info/sparse_initializer list of // the training graph is the concatenation of // "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer" // and "algorithm.input/initializer/output/node/value_info/sparse_initializer" // in that order. This combined graph must satisfy the normal ONNX conditions. // Now, let's provide a visualization of graph combination for clarity. // Let the inference graph (i.e., "ModelProto.graph") be // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d // and the "algorithm" graph be // tensor_d -> Add -> tensor_e // The combination process results // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e // // Notice that an input of a node in the "algorithm" graph may reference the // output of a node in the inference graph (but not the other way round). Also, inference // node cannot reference inputs of "algorithm". With these restrictions, inference graph // can always be run independently without training information. // // By default, this field is an empty graph and its evaluation does not // produce any output. Evaluating the default training step never // update any initializers. GraphProto algorithm = 2; // This field specifies the bindings from the outputs of "initialization" to // some initializers in "ModelProto.graph.initializer" and // the "algorithm.initializer" in the same TrainingInfoProto. // See "update_binding" below for details. // // By default, this field is empty and no initializer would be changed // by the execution of "initialization". repeated StringStringEntryProto initialization_binding = 3; // Gradient-based training is usually an iterative procedure. In one gradient // descent iteration, we apply // // x = x - r * g // // where "x" is the optimized tensor, "r" stands for learning rate, and "g" is // gradient of "x" with respect to a chosen loss. To avoid adding assignments // into the training graph, we split the update equation into // // y = x - r * g // x = y // // The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To // tell that "y" should be assigned to "x", the field "update_binding" may // contain a key-value pair of strings, "x" (key of StringStringEntryProto) // and "y" (value of StringStringEntryProto). // For a neural network with multiple trainable (mutable) tensors, there can // be multiple key-value pairs in "update_binding". // // The initializers appears as keys in "update_binding" are considered // mutable variables. This implies some behaviors // as described below. // // 1. We have only unique keys in all "update_binding"s so that two // variables may not have the same name. This ensures that one // variable is assigned up to once. // 2. The keys must appear in names of "ModelProto.graph.initializer" or // "TrainingInfoProto.algorithm.initializer". // 3. The values must be output names of "algorithm" or "ModelProto.graph.output". // 4. Mutable variables are initialized to the value specified by the // corresponding initializer, and then potentially updated by // "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s. // // This field usually contains names of trainable tensors // (in ModelProto.graph), optimizer states such as momentums in advanced // stochastic gradient methods (in TrainingInfoProto.graph), // and number of training iterations (in TrainingInfoProto.graph). // // By default, this field is empty and no initializer would be changed // by the execution of "algorithm". repeated StringStringEntryProto update_binding = 4; } // Models // // ModelProto is a top-level file/container format for bundling a ML model and // associating its computation graph with metadata. // // The semantics of the model are described by the associated GraphProto's. message ModelProto { // The version of the IR this model targets. See Version enum above. // This field MUST be present. int64 ir_version = 1; // The OperatorSets this model relies on. // All ModelProtos MUST have at least one entry that // specifies which version of the ONNX OperatorSet is // being imported. // // All nodes in the ModelProto's graph will bind against the operator // with the same-domain/same-op_type operator with the HIGHEST version // in the referenced operator sets. repeated OperatorSetIdProto opset_import = 8; // The name of the framework or tool used to generate this model. // This field SHOULD be present to indicate which implementation/tool/framework // emitted the model. string producer_name = 2; // The version of the framework or tool used to generate this model. // This field SHOULD be present to indicate which implementation/tool/framework // emitted the model. string producer_version = 3; // Domain name of the model. // We use reverse domain names as name space indicators. For example: // `com.facebook.fair` or `com.microsoft.cognitiveservices` // // Together with `model_version` and GraphProto.name, this forms the unique identity of // the graph. string domain = 4; // The version of the graph encoded. See Version enum below. int64 model_version = 5; // A human-readable documentation for this model. Markdown is allowed. string doc_string = 6; // The parameterized graph that is evaluated to execute the model. GraphProto graph = 7; // Named metadata values; keys should be distinct. repeated StringStringEntryProto metadata_props = 14; // Training-specific information. Sequentially executing all stored // `TrainingInfoProto.algorithm`s and assigning their outputs following // the corresponding `TrainingInfoProto.update_binding`s is one training // iteration. Similarly, to initialize the model // (as if training hasn't happened), the user should sequentially execute // all stored `TrainingInfoProto.initialization`s and assigns their outputs // using `TrainingInfoProto.initialization_binding`s. // // If this field is empty, the training behavior of the model is undefined. repeated TrainingInfoProto training_info = 20; // A list of function protos local to the model. // // Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain". // In case of any conflicts the behavior (whether the model local functions are given higher priority, // or standard operator sets are given higher priotity or this is treated as error) is defined by // the runtimes. // // The operator sets imported by FunctionProto should be compatible with the ones // imported by ModelProto and other model local FunctionProtos. // Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto // or by 2 FunctionProtos then versions for the operator set may be different but, // the operator schema returned for op_type, domain, version combination // for both the versions should be same for every node in the function body. // // One FunctionProto can reference other FunctionProto in the model, however, recursive reference // is not allowed. repeated FunctionProto functions = 25; }; // StringStringEntryProto follows the pattern for cross-proto-version maps. // See https://developers.google.com/protocol-buffers/docs/proto3#maps message StringStringEntryProto { string key = 1; string value = 2; }; message TensorAnnotation { string tensor_name = 1; // <key, value> pairs to annotate tensor specified by <tensor_name> above. // The keys used in the mapping below must be pre-defined in ONNX spec. // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as // quantization parameter keys. repeated StringStringEntryProto quant_parameter_tensor_names = 2; } // Graphs // // A graph defines the computational logic of a model and is comprised of a parameterized // list of nodes that form a directed acyclic graph based on their inputs and outputs. // This is the equivalent of the "network" or "graph" in many deep learning // frameworks. message GraphProto { // The nodes in the graph, sorted topologically. repeated NodeProto node = 1; // The name of the graph. string name = 2; // namespace Graph // A list of named tensor values, used to specify constant inputs of the graph. // Each initializer (both TensorProto as well SparseTensorProto) MUST have a name. // The name MUST be unique across both initializer and sparse_initializer, // but the name MAY also appear in the input list. repeated TensorProto initializer = 5; // Initializers (see above) stored in sparse format. repeated SparseTensorProto sparse_initializer = 15; // A human-readable documentation for this graph. Markdown is allowed. string doc_string = 10; // The inputs and outputs of the graph. repeated ValueInfoProto input = 11; repeated ValueInfoProto output = 12; // Information for the values in the graph. The ValueInfoProto.name's // must be distinct. It is optional for a value to appear in value_info list. repeated ValueInfoProto value_info = 13; // This field carries information to indicate the mapping among a tensor and its // quantization parameter tensors. For example: // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. repeated TensorAnnotation quantization_annotation = 14; reserved 3, 4, 6 to 9; reserved "ir_version", "producer_version", "producer_tag", "domain"; } // Tensors // // A serialized tensor value. message TensorProto { enum DataType { UNDEFINED = 0; // Basic types. FLOAT = 1; // float UINT8 = 2; // uint8_t INT8 = 3; // int8_t UINT16 = 4; // uint16_t INT16 = 5; // int16_t INT32 = 6; // int32_t INT64 = 7; // int64_t STRING = 8; // string BOOL = 9; // bool // IEEE754 half-precision floating-point format (16 bits wide). // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. FLOAT16 = 10; DOUBLE = 11; UINT32 = 12; UINT64 = 13; COMPLEX64 = 14; // complex with float32 real and imaginary components COMPLEX128 = 15; // complex with float64 real and imaginary components // Non-IEEE floating-point format based on IEEE754 single-precision // floating-point number truncated to 16 bits. // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. BFLOAT16 = 16; // Non-IEEE floating-point format based on papers // FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433, // 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf. // Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear. // The computation usually happens inside a block quantize / dequantize // fused by the runtime. FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero // Future extensions go here. } // The shape of the tensor. repeated int64 dims = 1; // The data type of the tensor. // This field MUST have a valid TensorProto.DataType value int32 data_type = 2; // For very large tensors, we may want to store them in chunks, in which // case the following fields will specify the segment that is stored in // the current TensorProto. message Segment { int64 begin = 1; int64 end = 2; } Segment segment = 3; // Tensor content must be organized in row-major order. // // Depending on the data_type field, exactly one of the fields below with // name ending in _data is used to store the elements of the tensor. // For float and complex64 values // Complex64 tensors are encoded as a single array of floats, // with the real components appearing in odd numbered positions, // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. repeated float float_data = 4 [packed = true]; // For int32, uint8, int8, uint16, int16, bool, float8, and float16 values // float16 and float8 values must be bit-wise converted to an uint16_t prior // to writing to the buffer. // When this field is present, the data_type field MUST be // INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ repeated int32 int32_data = 5 [packed = true]; // For strings. // Each element of string_data is a UTF-8 encoded Unicode // string. No trailing null, no leading BOM. The protobuf "string" // scalar type is not used to match ML community conventions. // When this field is present, the data_type field MUST be STRING repeated bytes string_data = 6; // For int64. // When this field is present, the data_type field MUST be INT64 repeated int64 int64_data = 7 [packed = true]; // Optionally, a name for the tensor. string name = 8; // namespace Value // A human-readable documentation for this tensor. Markdown is allowed. string doc_string = 12; // Serializations can either use one of the fields above, or use this // raw bytes field. The only exception is the string case, where one is // required to store the content in the repeated bytes string_data field. // // When this raw_data field is used to store tensor value, elements MUST // be stored in as fixed-width, little-endian order. // Floating-point data types MUST be stored in IEEE 754 format. // Complex64 elements must be written as two consecutive FLOAT values, real component first. // Complex128 elements must be written as two consecutive DOUBLE values, real component first. // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). // // Note: the advantage of specific field rather than the raw_data field is // that in some cases (e.g. int data), protobuf does a better packing via // variable length storage, and may lead to smaller binary footprint. // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED bytes raw_data = 9; // Data can be stored inside the protobuf file using type-specific fields or raw_data. // Alternatively, raw bytes data can be stored in an external file, using the external_data field. // external_data stores key-value pairs describing data location. Recognized keys are: // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX // protobuf model was stored // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. // - "length" (optional) - number of bytes containing data. Integer stored as string. // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. repeated StringStringEntryProto external_data = 13; // Location of the data for this tensor. MUST be one of: // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. // - EXTERNAL - data stored in an external location as described by external_data field. enum DataLocation { DEFAULT = 0; EXTERNAL = 1; } // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. DataLocation data_location = 14; // For double // Complex128 tensors are encoded as a single array of doubles, // with the real components appearing in odd numbered positions, // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 repeated double double_data = 10 [packed = true]; // For uint64 and uint32 values // When this field is present, the data_type field MUST be // UINT32 or UINT64 repeated uint64 uint64_data = 11 [packed = true]; } // A serialized sparse-tensor value message SparseTensorProto { // The sequence of non-default values are encoded as a tensor of shape [NNZ]. // The default-value is zero for numeric tensors, and empty-string for string tensors. // values must have a non-empty name present which serves as a name for SparseTensorProto // when used in sparse_initializer list. TensorProto values = 1; // The indices of the non-default values, which may be stored in one of two formats. // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value // corresponding to the j-th index of the i-th value (in the values tensor). // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value // must be the linearized-index of the i-th value (in the values tensor). // The linearized-index can be converted into an index tuple (k_1,...,k_rank) // using the shape provided below. // The indices must appear in ascending order without duplication. // In the first format, the ordering is lexicographic-ordering: // e.g., index-value [1,4] must appear before [2,1] TensorProto indices = 2; // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] repeated int64 dims = 3; } // Defines a tensor shape. A dimension can be either an integer value // or a symbolic variable. A symbolic variable represents an unknown // dimension. message TensorShapeProto { message Dimension { oneof value { int64 dim_value = 1; string dim_param = 2; // namespace Shape }; // Standard denotation can optionally be used to denote tensor // dimensions with standard semantic descriptions to ensure // that operations are applied to the correct axis of a tensor. // Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition // for pre-defined dimension denotations. string denotation = 3; }; repeated Dimension dim = 1; } // Types // // The standard ONNX data types. message TypeProto { message Tensor { // This field MUST NOT have the value of UNDEFINED // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. int32 elem_type = 1; TensorShapeProto shape = 2; } // repeated T message Sequence { // The type and optional shape of each element of the sequence. // This field MUST be present for this version of the IR. TypeProto elem_type = 1; }; // map<K,V> message Map { // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING int32 key_type = 1; // This field MUST be present for this version of the IR. TypeProto value_type = 2; }; // wrapper for Tensor, Sequence, or Map message Optional { // The type and optional shape of the element wrapped. // This field MUST be present for this version of the IR. // Possible values correspond to OptionalProto.DataType enum TypeProto elem_type = 1; }; message SparseTensor { // This field MUST NOT have the value of UNDEFINED // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. int32 elem_type = 1; TensorShapeProto shape = 2; } oneof value { // The type of a tensor. Tensor tensor_type = 1; // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values // as input and output to graphs and nodes. These types are needed to naturally // support classical ML operators. DNN operators SHOULD restrict their input // and output types to tensors. // The type of a sequence. Sequence sequence_type = 4; // The type of a map. Map map_type = 5; // The type of an optional. Optional optional_type = 9; // Type of the sparse tensor SparseTensor sparse_tensor_type = 8; } // An optional denotation can be used to denote the whole // type with a standard semantic description as to what is // stored inside. Refer to https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition // for pre-defined type denotations. string denotation = 6; } // Operator Sets // // OperatorSets are uniquely identified by a (domain, opset_version) pair. message OperatorSetIdProto { // The domain of the operator set being identified. // The empty string ("") or absence of this field implies the operator // set that is defined as part of the ONNX specification. // This field MUST be present in this version of the IR when referring to any other operator set. string domain = 1; // The version of the operator set being identified. // This field MUST be present in this version of the IR. int64 version = 2; } // Operator/function status. enum OperatorStatus { EXPERIMENTAL = 0; STABLE = 1; } message FunctionProto { // The name of the function, similar usage of op_type in OperatorProto. // Combined with FunctionProto.domain, this forms the unique identity of // the FunctionProto. string name = 1; // Deprecated since IR Version 8 // optional int64 since_version = 2; reserved 2; reserved "since_version"; // Deprecated since IR Version 8 // optional OperatorStatus status = 3; reserved 3; reserved "status"; // The inputs and outputs of the function. repeated string input = 4; repeated string output = 5; // The attribute parameters of the function. // It is for function parameters without default values. repeated string attribute = 6; // The attribute protos of the function. // It is for function attributes with default values. // A function attribute shall be represented either as // a string attribute or an AttributeProto, not both. repeated AttributeProto attribute_proto = 11; // The nodes in the function. repeated NodeProto node = 7; // A human-readable documentation for this function. Markdown is allowed. string doc_string = 8; // The OperatorSets this function body (graph) relies on. // // All nodes in the function body (graph) will bind against the operator // with the same-domain/same-op_type operator with the HIGHEST version // in the referenced operator sets. This means at most one version can be relied // for one domain. // // The operator sets imported by FunctionProto should be compatible with the ones // imported by ModelProto. Example, if same operator set say 'A' is imported by FunctionProto // and ModelProto then versions for the operator set may be different but, // the operator schema returned for op_type, domain, version combination // for both the versions should be same. repeated OperatorSetIdProto opset_import = 9; // The domain which this function belongs to. Combined with FunctionProto.name, this forms the unique identity of // the FunctionProto. string domain = 10; } // For using protobuf-lite option optimize_for = LITE_RUNTIME;
candle/candle-onnx/src/onnx.proto3/0
{ "file_path": "candle/candle-onnx/src/onnx.proto3", "repo_id": "candle", "token_count": 10183 }
40
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor @staticmethod def silu(tensor: Tensor) -> Tensor: """ Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. """ pass @staticmethod def softmax(tensor: Tensor, dim: int) -> Tensor: """ Applies the Softmax function to a given tensor.# """ pass
candle/candle-pyo3/py_src/candle/nn/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/__init__.pyi", "repo_id": "candle", "token_count": 181 }
41
use ::candle::Tensor; use pyo3::prelude::*; #[derive(Clone, Debug)] /// Represents an absolute shape e.g. (1, 2, 3) pub struct PyShape(Vec<usize>); impl<'source> pyo3::FromPyObject<'source> for PyShape { fn extract(ob: &'source PyAny) -> PyResult<Self> { if ob.is_none() { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>( "Shape cannot be None", )); } let tuple = ob.downcast::<pyo3::types::PyTuple>()?; if tuple.len() == 1 { let first_element = tuple.get_item(0)?; let dims: Vec<usize> = pyo3::FromPyObject::extract(first_element)?; Ok(PyShape(dims)) } else { let dims: Vec<usize> = pyo3::FromPyObject::extract(tuple)?; Ok(PyShape(dims)) } } } impl From<PyShape> for ::candle::Shape { fn from(val: PyShape) -> Self { val.0.into() } } #[derive(Clone, Debug)] /// Represents a shape with a hole in it e.g. (1, -1, 3) pub struct PyShapeWithHole(Vec<isize>); impl<'source> pyo3::FromPyObject<'source> for PyShapeWithHole { fn extract(ob: &'source PyAny) -> PyResult<Self> { if ob.is_none() { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>( "Shape cannot be None", )); } let tuple = ob.downcast::<pyo3::types::PyTuple>()?; let dims: Vec<isize> = if tuple.len() == 1 { let first_element = tuple.get_item(0)?; pyo3::FromPyObject::extract(first_element)? } else { pyo3::FromPyObject::extract(tuple)? }; // Ensure we have only positive numbers and at most one "hole" (-1) let negative_ones = dims.iter().filter(|&&x| x == -1).count(); let any_invalid_dimensions = dims.iter().any(|&x| x < -1 || x == 0); if negative_ones > 1 || any_invalid_dimensions { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!( "Invalid dimension in shape: {:?}", dims ))); } Ok(PyShapeWithHole(dims)) } } impl PyShapeWithHole { /// Returns `true` if the shape is absolute e.g. (1, 2, 3) pub fn is_absolute(&self) -> bool { self.0.iter().all(|x| *x > 0) } /// Convert a relative shape to an absolute shape e.g. (1, -1) -> (1, 12) pub fn to_absolute(&self, t: &Tensor) -> PyResult<PyShape> { if self.is_absolute() { return Ok(PyShape( self.0.iter().map(|x| *x as usize).collect::<Vec<usize>>(), )); } let mut elements = t.elem_count(); let mut new_dims: Vec<usize> = vec![]; for dim in self.0.iter() { if *dim > 0 { new_dims.push(*dim as usize); elements /= *dim as usize; } else if *dim == -1 { new_dims.push(elements); } else { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!( "Invalid dimension in shape: {}", dim ))); } } Ok(PyShape(new_dims)) } }
candle/candle-pyo3/src/shape.rs/0
{ "file_path": "candle/candle-pyo3/src/shape.rs", "repo_id": "candle", "token_count": 1646 }
42
//! Based from the Stanford Hazy Research group. //! //! See "Simple linear attention language models balance the recall-throughput tradeoff", Arora et al. 2024 //! <https://arxiv.org/abs/2402.18668> //! Original code: //! https://github.com/HazyResearch/based use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{ conv1d_no_bias, linear, linear_no_bias, ops::softmax_last_dim, rms_norm, Conv1d, Conv1dConfig, Func, Linear, RmsNorm, VarBuilder, }; use std::sync::Arc; #[derive(Debug, Clone, serde::Deserialize)] pub struct LinearAttentionFeatureMapConfig { input_dim: usize, } #[derive(Debug, Clone, serde::Deserialize)] pub struct LinearAttentionConfig { num_heads: usize, feature_dim: usize, feature_map: LinearAttentionFeatureMapConfig, } #[derive(Debug, Clone, serde::Deserialize)] pub struct SlidingWindowAttentionConfig { num_heads: usize, window_size: usize, } #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { vocab_size: usize, #[serde(rename = "n_embd")] hidden_size: usize, #[serde(rename = "n_inner")] intermediate_size: usize, #[serde(rename = "n_layer")] num_hidden_layers: usize, #[serde(rename = "n_head")] num_attention_heads: usize, layer_norm_epsilon: f64, #[serde(default = "default_rope", rename = "rotary_emb_base")] rope_theta: f64, alt_mixer_layers: Vec<usize>, alt_mixer_2_layers: Vec<usize>, #[serde(rename = "alt_mixer")] la: LinearAttentionConfig, #[serde(rename = "alt_mixer_2")] swa: SlidingWindowAttentionConfig, } fn default_rope() -> f64 { 10_000.0 } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { fc1: Linear, fc2: Linear, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let fc1 = linear_no_bias(cfg.hidden_size, cfg.hidden_size * 4, vb.pp("fc1"))?; let fc2 = linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { fc1, fc2 }) } } // Swiglu implementation. // Not using Activation::Swiglu because this has the gate and y arguments switched compared to the version in candle-nn/src/ops.rs fn swiglu(xs: &Tensor) -> Result<Tensor> { let xs = xs.chunk(2, D::Minus1)?; &xs[1].silu()? * &xs[0] } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.fc1)?; let xs = swiglu(&xs)?; let xs = xs.apply(&self.fc2)?; Ok(xs) } } // A gated convolutional block. #[derive(Debug, Clone)] struct BasedConv { in_proj: Linear, out_proj: Linear, conv: Conv1d, state: Tensor, } impl BasedConv { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dim = cfg.hidden_size * 2; let conv1d_cfg = Conv1dConfig { groups: dim, padding: 2, ..Default::default() }; let in_proj = linear(cfg.hidden_size, cfg.hidden_size * 4, vb.pp("in_proj"))?; let out_proj = linear(dim, cfg.hidden_size, vb.pp("out_proj"))?; let conv = conv1d_no_bias(dim, dim, 3, conv1d_cfg, vb.pp("conv.conv"))?; let state = Tensor::zeros((1, dim, 3), vb.dtype(), vb.device())?; Ok(Self { in_proj, out_proj, conv, state, }) } fn step(&mut self, xs: &Tensor) -> Result<Tensor> { self.state = self.state.roll(-1, D::Minus1)?; let (_, _, l) = self.state.dims3()?; self.state = self.state.narrow(D::Minus1, 0, l - 1)?; self.state = Tensor::cat(&[&self.state, &xs.transpose(1, 2)?], 2)?; let xs = (&self.state * self.conv.weight().permute((1, 0, 2))?)? .sum_keepdim(0)? .sum(D::Minus1)?; let xs = xs.unsqueeze(1)?; Ok(xs) } fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let xs = xs.apply(&self.in_proj)?; let us = xs.chunk(2, D::Minus1)?; let (_b, l, _d) = us[0].dims3()?; let u_conv = if seqlen_offset > 0 { self.step(&us[0])? } else { let k = std::cmp::min(3, l); self.state = self.state.narrow(D::Minus1, 0, 3 - k)?; let xs = us[0].narrow(1, l - k, k)?.transpose(1, 2)?; self.state = Tensor::cat(&[&self.state, &xs], 2)?; us[0] .transpose(1, 2)? .apply(&self.conv)? .narrow(D::Minus1, 0, l)? .transpose(1, 2)? }; let u_conv = u_conv.silu()?; let v = u_conv.broadcast_mul(&us[1])?; let xs = v.apply(&self.out_proj)?; Ok(xs) } } // Linear attention approximating softmax using second order Taylor polynomials. #[derive(Debug, Clone)] struct LinearAttention { proj_q: Linear, proj_k: Linear, proj_v: Linear, out_proj: Linear, feature_dim: usize, num_heads: usize, input_dim: usize, k_state: Tensor, kv_state: Tensor, } impl LinearAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let input_dim = cfg.la.feature_map.input_dim; let out_proj = linear_no_bias(cfg.hidden_size, cfg.hidden_size, vb.pp("out_proj"))?; let proj_k = linear_no_bias( cfg.hidden_size, cfg.la.num_heads * cfg.la.feature_dim, vb.pp("proj_k"), )?; let proj_q = linear_no_bias( cfg.hidden_size, cfg.la.num_heads * cfg.la.feature_dim, vb.pp("proj_q"), )?; let proj_v = linear_no_bias(cfg.hidden_size, cfg.hidden_size, vb.pp("proj_v"))?; let expanded_size = cfg.la.feature_dim.pow(2) + cfg.la.feature_dim + 1; let k_state = Tensor::zeros( (1, cfg.la.num_heads, 1, 1, expanded_size), vb.dtype(), vb.device(), )?; let kv_state = Tensor::zeros( (1, cfg.la.num_heads, cfg.la.feature_dim, expanded_size), vb.dtype(), vb.device(), )?; Ok(Self { proj_q, proj_k, proj_v, out_proj, feature_dim: cfg.la.feature_dim, num_heads: cfg.la.num_heads, input_dim, k_state, kv_state, }) } fn taylor_expansion(&self) -> Result<Func<'static>> { let r2 = std::f64::consts::SQRT_2; let rd = (self.input_dim as f64).sqrt(); let rrd = rd.sqrt(); Ok(Func::new(move |xs| { let dims = xs.dims(); let mut d = dims.to_vec(); if let Some(last) = d.last_mut() { *last = 1; }; let x = xs .unsqueeze(D::Minus1)? .broadcast_mul(&xs.unsqueeze(D::Minus2)?)?; let x = (x.flatten_from(D::Minus2)? / r2)?; let o = Tensor::ones(d, xs.dtype(), xs.device())?; let x = Tensor::cat(&[o, (xs / rrd)?, (&x / rd)?], D::Minus1)?; Ok(x) })) } fn forward(&mut self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let eps = 1e-12; let feature_map = self.taylor_expansion()?; let (b, l, d) = xs.dims3()?; let q = xs.apply(&self.proj_q)?; let k = xs.apply(&self.proj_k)?; let v = xs.apply(&self.proj_v)?; let q = q .reshape((b, l, self.num_heads, self.feature_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b, l, self.num_heads, self.feature_dim))? .transpose(1, 2)? .contiguous()?; let v = v .reshape((b, l, self.num_heads, d / self.num_heads))? .transpose(1, 2)? .contiguous()?; let q = feature_map.forward(&q)?; let k = feature_map.forward(&k)?; let y = if seqlen_offset > 0 { let (_b, _h, l, _d) = k.dims4()?; let q = q.unsqueeze(D::Minus2)?; let k = k.unsqueeze(D::Minus2)?; let v = v.unsqueeze(D::Minus1)?; let kn = k.narrow(D::Minus1, l - 1, 1)?; let vn = v.narrow(D::Minus1, l - 1, 1)?; self.k_state = self.k_state.broadcast_add(&kn)?; self.kv_state = self.kv_state.broadcast_add(&kn.broadcast_mul(&vn)?)?; let num = q.broadcast_mul(&self.kv_state)?.sum(D::Minus1)?; let den = (q.broadcast_mul(&self.k_state)?.sum(D::Minus1)? + eps)?; num.broadcast_div(&den)? } else { self.k_state = k.sum(2)?.unsqueeze(2)?.unsqueeze(3)?; self.kv_state = k .transpose(2, 3)? .matmul(&v)? .transpose(2, 3)? .unsqueeze(2)?; let aqk = q.matmul(&k.transpose(D::Minus1, D::Minus2)?)?; let tril = Tensor::tril2(l, aqk.dtype(), aqk.device())?; let aqk = aqk.broadcast_mul(&tril)?.matmul(&v)?; let z = (1f64 / (q.mul(&k.cumsum(2)?)?.sum(D::Minus1)? + eps)?)?; aqk.broadcast_mul(&z.unsqueeze(D::Minus1)?)? }; let (b, h, l, d) = y.dims4()?; let y = y.permute((0, 2, 1, 3))?.reshape((b, l, h * d))?; let y = self.out_proj.forward(&y)?; Ok(y) } } // Rotary embeddings used in local attention. #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = 2048; // Hardcoded, missing from config. let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } // Local attention using a small sliding window. #[derive(Debug, Clone)] struct SlidingWindowAttention { wqkv: Linear, out_proj: Linear, num_heads: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl SlidingWindowAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size; let num_heads = cfg.swa.num_heads; let head_dim = hidden_size / num_heads; let out_proj = linear_no_bias(hidden_size, hidden_size, vb.pp("out_proj"))?; let wqkv = linear_no_bias(hidden_size, hidden_size * 3, vb.pp("Wqkv"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?); Ok(Self { wqkv, out_proj, hidden_size, num_heads, head_dim, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let qkv = xs.apply(&self.wqkv)?; let qkv = qkv.reshape((b_sz, q_len, 3, (), self.head_dim))?; let q = qkv.i((.., .., 0))?; let k = qkv.i((.., .., 1))?; let v = qkv.i((.., .., 2))?; let q = q .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let (q, k) = self .rotary_emb .apply_rotary_emb_qkv(&q, &k, seqlen_offset)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 2)?; let v = Tensor::cat(&[prev_v, &v], 2)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?; let out = attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.out_proj)?; Ok(out) } } // The model layers use three types of mixers. #[derive(Debug, Clone)] enum SequenceMixer { Based(BasedConv), Linear(LinearAttention), Sliding(SlidingWindowAttention), } impl SequenceMixer { fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { match self { Self::Based(b) => b.forward(xs, pos), Self::Linear(b) => b.forward(xs, pos), Self::Sliding(b) => b.forward(xs, attention_mask, pos), } } } #[derive(Debug, Clone)] struct DecoderLayer { mlp: MLP, norm1: RmsNorm, norm2: RmsNorm, mixer: SequenceMixer, } impl DecoderLayer { fn new(layer_idx: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let mlp = MLP::new(cfg, vb.pp("mlp"))?; let norm1 = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("norm1"))?; let norm2 = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("norm2"))?; let l_attn = cfg.alt_mixer_layers.contains(&layer_idx); let sw_attn = cfg.alt_mixer_2_layers.contains(&layer_idx); let mixer = if l_attn { SequenceMixer::Linear(LinearAttention::new(cfg, vb.pp("mixer"))?) } else if sw_attn { SequenceMixer::Sliding(SlidingWindowAttention::new(cfg, vb.pp("mixer"))?) } else { SequenceMixer::Based(BasedConv::new(cfg, vb.pp("mixer"))?) }; Ok(Self { mlp, norm1, norm2, mixer, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.norm1.forward(xs)?; let xs = self.mixer.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.norm2)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: super::with_tracing::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vocab_size = cfg.vocab_size + (8 - cfg.vocab_size % 8) % 8; let lm_head = linear_no_bias(cfg.hidden_size, vocab_size, vb.pp("lm_head"))?; let embed_tokens = super::with_tracing::Embedding::from_weights(lm_head.weight().clone())?; let vb_m = vb.pp("transformer"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(layer_idx, cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = rms_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb_m.pp("ln_f"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.swa.window_size, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let sliding_window = self.sliding_window / 2; let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/based.rs/0
{ "file_path": "candle/candle-transformers/src/models/based.rs", "repo_id": "candle", "token_count": 9918 }
43
use super::with_tracing::{linear, linear_no_bias, Embedding, Linear}; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum PositionEmbeddingType { Absolute, Alibi, } // https://huggingface.co/jinaai/jina-bert-implementation/blob/main/configuration_bert.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub intermediate_size: usize, pub hidden_act: candle_nn::Activation, pub max_position_embeddings: usize, pub type_vocab_size: usize, pub initializer_range: f64, pub layer_norm_eps: f64, pub pad_token_id: usize, pub position_embedding_type: PositionEmbeddingType, } impl Config { pub fn v2_base() -> Self { // https://huggingface.co/jinaai/jina-embeddings-v2-base-en/blob/main/config.json Self { vocab_size: 30528, hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: candle_nn::Activation::Gelu, max_position_embeddings: 8192, type_vocab_size: 2, initializer_range: 0.02, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Alibi, } } #[allow(clippy::too_many_arguments)] pub fn new( vocab_size: usize, hidden_size: usize, num_hidden_layers: usize, num_attention_heads: usize, intermediate_size: usize, hidden_act: candle_nn::Activation, max_position_embeddings: usize, type_vocab_size: usize, initializer_range: f64, layer_norm_eps: f64, pad_token_id: usize, position_embedding_type: PositionEmbeddingType, ) -> Self { Config { vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, } } } #[derive(Clone, Debug)] struct BertEmbeddings { word_embeddings: Embedding, // no position_embeddings as we only support alibi. token_type_embeddings: Embedding, layer_norm: LayerNorm, span: tracing::Span, } impl BertEmbeddings { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let word_embeddings = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?; let token_type_embeddings = Embedding::new( cfg.type_vocab_size, cfg.hidden_size, vb.pp("token_type_embeddings"), )?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { word_embeddings, token_type_embeddings, layer_norm, span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } } impl Module for BertEmbeddings { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let token_type_embeddings = Tensor::zeros(seq_len, DType::U32, input_ids.device())? .broadcast_left(b_size)? .apply(&self.token_type_embeddings)?; let embeddings = (&input_embeddings + token_type_embeddings)?; let embeddings = self.layer_norm.forward(&embeddings)?; Ok(embeddings) } } #[derive(Clone, Debug)] struct BertSelfAttention { query: Linear, key: Linear, value: Linear, num_attention_heads: usize, attention_head_size: usize, span: tracing::Span, span_softmax: tracing::Span, } impl BertSelfAttention { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attention_head_size = cfg.hidden_size / cfg.num_attention_heads; let all_head_size = cfg.num_attention_heads * attention_head_size; let hidden_size = cfg.hidden_size; let query = linear(hidden_size, all_head_size, vb.pp("query"))?; let value = linear(hidden_size, all_head_size, vb.pp("value"))?; let key = linear(hidden_size, all_head_size, vb.pp("key"))?; Ok(Self { query, key, value, num_attention_heads: cfg.num_attention_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "self-attn"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let mut x_shape = xs.dims().to_vec(); x_shape.pop(); x_shape.push(self.num_attention_heads); x_shape.push(self.attention_head_size); xs.reshape(x_shape)?.transpose(1, 2)?.contiguous() } fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let query_layer = self.query.forward(xs)?; let key_layer = self.key.forward(xs)?; let value_layer = self.value.forward(xs)?; let query_layer = self.transpose_for_scores(&query_layer)?; let key_layer = self.transpose_for_scores(&key_layer)?; let value_layer = self.transpose_for_scores(&value_layer)?; let attention_scores = query_layer.matmul(&key_layer.t()?)?; let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?; let attention_scores = attention_scores.broadcast_add(bias)?; let attention_probs = { let _enter_sm = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attention_scores)? }; let context_layer = attention_probs.matmul(&value_layer)?; let context_layer = context_layer.transpose(1, 2)?.contiguous()?; let context_layer = context_layer.flatten_from(D::Minus2)?; Ok(context_layer) } } #[derive(Clone, Debug)] struct BertSelfOutput { dense: Linear, layer_norm: LayerNorm, span: tracing::Span, } impl BertSelfOutput { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-out"), }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.dense.forward(xs)?; self.layer_norm.forward(&(xs + input_tensor)?) } } #[derive(Clone, Debug)] struct BertAttention { self_attention: BertSelfAttention, self_output: BertSelfOutput, span: tracing::Span, } impl BertAttention { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = BertSelfAttention::new(vb.pp("self"), cfg)?; let self_output = BertSelfOutput::new(vb.pp("output"), cfg)?; Ok(Self { self_attention, self_output, span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let self_outputs = self.self_attention.forward(xs, bias)?; let attention_output = self.self_output.forward(&self_outputs, xs)?; Ok(attention_output) } } #[derive(Clone, Debug)] struct BertGLUMLP { gated_layers: Linear, act: candle_nn::Activation, wo: Linear, layernorm: LayerNorm, intermediate_size: usize, } impl BertGLUMLP { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let gated_layers = linear_no_bias( cfg.hidden_size, cfg.intermediate_size * 2, vb.pp("gated_layers"), )?; let act = candle_nn::Activation::Gelu; // geglu let wo = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("wo"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layernorm"))?; Ok(Self { gated_layers, act, wo, layernorm, intermediate_size: cfg.intermediate_size, }) } } impl Module for BertGLUMLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.gated_layers)?; let gated = xs.narrow(D::Minus1, 0, self.intermediate_size)?; let non_gated = xs.narrow(D::Minus1, self.intermediate_size, self.intermediate_size)?; let xs = (gated.apply(&self.act) * non_gated)?.apply(&self.wo); (xs + residual)?.apply(&self.layernorm) } } #[derive(Clone, Debug)] struct BertLayer { attention: BertAttention, mlp: BertGLUMLP, span: tracing::Span, } impl BertLayer { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attention = BertAttention::new(vb.pp("attention"), cfg)?; let mlp = BertGLUMLP::new(vb.pp("mlp"), cfg)?; Ok(Self { attention, mlp, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.attention.forward(xs, bias)?.apply(&self.mlp) } } fn build_alibi_bias(cfg: &Config) -> Result<Tensor> { let n_heads = cfg.num_attention_heads; let seq_len = cfg.max_position_embeddings; let alibi_bias = Tensor::arange(0, seq_len as i64, &Device::Cpu)?.to_dtype(DType::F32)?; let alibi_bias = { let a1 = alibi_bias.reshape((1, seq_len))?; let a2 = alibi_bias.reshape((seq_len, 1))?; a1.broadcast_sub(&a2)?.abs()?.broadcast_left(n_heads)? }; let mut n_heads2 = 1; while n_heads2 < n_heads { n_heads2 *= 2 } let slopes = (1..=n_heads2) .map(|v| -1f32 / 2f32.powf((v * 8) as f32 / n_heads2 as f32)) .collect::<Vec<_>>(); let slopes = if n_heads2 == n_heads { slopes } else { slopes .iter() .skip(1) .step_by(2) .chain(slopes.iter().step_by(2)) .take(n_heads) .cloned() .collect::<Vec<f32>>() }; let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?; alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes) } #[derive(Clone, Debug)] struct BertEncoder { alibi: Tensor, layers: Vec<BertLayer>, span: tracing::Span, } impl BertEncoder { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { if cfg.position_embedding_type != PositionEmbeddingType::Alibi { candle::bail!("only alibi is supported as a position-embedding-type") } let layers = (0..cfg.num_hidden_layers) .map(|index| BertLayer::new(vb.pp(&format!("layer.{index}")), cfg)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); let alibi = build_alibi_bias(cfg)?.to_device(vb.device())?; Ok(Self { alibi, layers, span, }) } } impl Module for BertEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let seq_len = xs.dim(1)?; let alibi_bias = self.alibi.i((.., .., ..seq_len, ..seq_len))?; let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, &alibi_bias)? } Ok(xs) } } #[derive(Clone, Debug)] pub struct BertModel { embeddings: BertEmbeddings, encoder: BertEncoder, pub device: Device, span: tracing::Span, } impl BertModel { pub fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let embeddings = BertEmbeddings::new(vb.pp("embeddings"), cfg)?; let encoder = BertEncoder::new(vb.pp("encoder"), cfg)?; Ok(Self { embeddings, encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } } impl Module for BertModel { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids)?; let sequence_output = self.encoder.forward(&embedding_output)?; Ok(sequence_output) } }
candle/candle-transformers/src/models/jina_bert.rs/0
{ "file_path": "candle/candle-transformers/src/models/jina_bert.rs", "repo_id": "candle", "token_count": 6287 }
44
// Implement the MMDiT model originally introduced for Stable Diffusion 3 (https://arxiv.org/abs/2403.03206). // This follows the implementation of the MMDiT model in the ComfyUI repository. // https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py#L1 use candle::{Module, Result, Tensor, D}; use candle_nn as nn; use super::blocks::{ContextQkvOnlyJointBlock, FinalLayer, JointBlock}; use super::embedding::{ PatchEmbedder, PositionEmbedder, TimestepEmbedder, Unpatchifier, VectorEmbedder, }; #[derive(Debug, Clone)] pub struct Config { pub patch_size: usize, pub in_channels: usize, pub out_channels: usize, pub depth: usize, pub head_size: usize, pub adm_in_channels: usize, pub pos_embed_max_size: usize, pub context_embed_size: usize, pub frequency_embedding_size: usize, } impl Config { pub fn sd3() -> Self { Self { patch_size: 2, in_channels: 16, out_channels: 16, depth: 24, head_size: 64, adm_in_channels: 2048, pos_embed_max_size: 192, context_embed_size: 4096, frequency_embedding_size: 256, } } } pub struct MMDiT { core: MMDiTCore, patch_embedder: PatchEmbedder, pos_embedder: PositionEmbedder, timestep_embedder: TimestepEmbedder, vector_embedder: VectorEmbedder, context_embedder: nn::Linear, unpatchifier: Unpatchifier, } impl MMDiT { pub fn new(cfg: &Config, vb: nn::VarBuilder) -> Result<Self> { let hidden_size = cfg.head_size * cfg.depth; let core = MMDiTCore::new( cfg.depth, hidden_size, cfg.depth, cfg.patch_size, cfg.out_channels, vb.clone(), )?; let patch_embedder = PatchEmbedder::new( cfg.patch_size, cfg.in_channels, hidden_size, vb.pp("x_embedder"), )?; let pos_embedder = PositionEmbedder::new( hidden_size, cfg.patch_size, cfg.pos_embed_max_size, vb.clone(), )?; let timestep_embedder = TimestepEmbedder::new( hidden_size, cfg.frequency_embedding_size, vb.pp("t_embedder"), )?; let vector_embedder = VectorEmbedder::new(cfg.adm_in_channels, hidden_size, vb.pp("y_embedder"))?; let context_embedder = nn::linear( cfg.context_embed_size, hidden_size, vb.pp("context_embedder"), )?; let unpatchifier = Unpatchifier::new(cfg.patch_size, cfg.out_channels)?; Ok(Self { core, patch_embedder, pos_embedder, timestep_embedder, vector_embedder, context_embedder, unpatchifier, }) } pub fn forward(&self, x: &Tensor, t: &Tensor, y: &Tensor, context: &Tensor) -> Result<Tensor> { // Following the convention of the ComfyUI implementation. // https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py#L919 // // Forward pass of DiT. // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // t: (N,) tensor of diffusion timesteps // y: (N,) tensor of class labels let h = x.dim(D::Minus2)?; let w = x.dim(D::Minus1)?; let cropped_pos_embed = self.pos_embedder.get_cropped_pos_embed(h, w)?; let x = self .patch_embedder .forward(x)? .broadcast_add(&cropped_pos_embed)?; let c = self.timestep_embedder.forward(t)?; let y = self.vector_embedder.forward(y)?; let c = (c + y)?; let context = self.context_embedder.forward(context)?; let x = self.core.forward(&context, &x, &c)?; let x = self.unpatchifier.unpatchify(&x, h, w)?; x.narrow(2, 0, h)?.narrow(3, 0, w) } } pub struct MMDiTCore { joint_blocks: Vec<JointBlock>, context_qkv_only_joint_block: ContextQkvOnlyJointBlock, final_layer: FinalLayer, } impl MMDiTCore { pub fn new( depth: usize, hidden_size: usize, num_heads: usize, patch_size: usize, out_channels: usize, vb: nn::VarBuilder, ) -> Result<Self> { let mut joint_blocks = Vec::with_capacity(depth - 1); for i in 0..depth - 1 { joint_blocks.push(JointBlock::new( hidden_size, num_heads, vb.pp(format!("joint_blocks.{}", i)), )?); } Ok(Self { joint_blocks, context_qkv_only_joint_block: ContextQkvOnlyJointBlock::new( hidden_size, num_heads, vb.pp(format!("joint_blocks.{}", depth - 1)), )?, final_layer: FinalLayer::new( hidden_size, patch_size, out_channels, vb.pp("final_layer"), )?, }) } pub fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<Tensor> { let (mut context, mut x) = (context.clone(), x.clone()); for joint_block in &self.joint_blocks { (context, x) = joint_block.forward(&context, &x, c)?; } let x = self.context_qkv_only_joint_block.forward(&context, &x, c)?; self.final_layer.forward(&x, c) } }
candle/candle-transformers/src/models/mmdit/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/mmdit/model.rs", "repo_id": "candle", "token_count": 2882 }
45
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub max_position_embeddings: usize, pub sliding_window: usize, pub max_window_layers: usize, pub tie_word_embeddings: bool, pub rope_theta: f64, pub rms_norm_eps: f64, pub use_sliding_window: bool, pub hidden_act: Activation, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; Ok(Self { embed_tokens, layers, norm, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_causal_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> { let (b_sz, sql_len) = attn_mask.dims2()?; let mut mask: Vec<Tensor> = vec![]; for b in 0..b_sz { mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?); } let mask = Tensor::cat(&mask, 0)?; let on_true = mask.zeros_like()?.to_dtype(self.dtype)?; let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)? .broadcast_as(mask.shape())? .to_dtype(self.dtype)?; mask.where_cond(&on_true, &on_false) } pub fn forward( &mut self, input_ids: &Tensor, seqlen_offset: usize, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask: Option<Tensor> = match attn_mask { Some(mask) => Some(self.prepare_attention_mask(mask)?), None => { if seq_len <= 1 { None } else { Some(self.prepare_causal_attention_mask(b_size, seq_len, seqlen_offset)?) } } }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.apply(&self.norm) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } } #[derive(Debug, Clone)] pub struct ModelForCausalLM { base_model: Model, lm_head: Linear, } impl ModelForCausalLM { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let base_model = Model::new(cfg, vb.clone())?; let lm_head = if vb.contains_tensor("lm_head.weight") { linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? } else { Linear::from_weights(base_model.embed_tokens.embeddings().clone(), None) }; Ok(Self { base_model, lm_head, }) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; self.base_model .forward(input_ids, seqlen_offset, None)? .narrow(1, seq_len - 1, 1)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { self.base_model.clear_kv_cache() } }
candle/candle-transformers/src/models/qwen2.rs/0
{ "file_path": "candle/candle-transformers/src/models/qwen2.rs", "repo_id": "candle", "token_count": 6708 }
46
//! Contrastive Language-Image Pre-Training //! //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! https://github.com/openai/CLIP use candle::{DType, Device, Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone, Copy)] pub enum Activation { QuickGelu, Gelu, GeluErf, } impl Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?, Activation::Gelu => xs.gelu(), Activation::GeluErf => xs.gelu_erf(), } } } #[derive(Debug, Clone)] pub struct Config { vocab_size: usize, embed_dim: usize, // aka config.hidden_size activation: Activation, // aka config.hidden_act intermediate_size: usize, pub max_position_embeddings: usize, // The character to use for padding, use EOS when not set. pub pad_with: Option<String>, num_hidden_layers: usize, num_attention_heads: usize, #[allow(dead_code)] projection_dim: usize, } impl Config { // The config details can be found in the "text_config" section of this json file: // https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json pub fn v1_5() -> Self { Self { vocab_size: 49408, embed_dim: 768, intermediate_size: 3072, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 768, activation: Activation::QuickGelu, } } // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/text_encoder/config.json pub fn v2_1() -> Self { Self { vocab_size: 49408, embed_dim: 1024, intermediate_size: 4096, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 23, num_attention_heads: 16, projection_dim: 512, activation: Activation::Gelu, } } // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder/config.json pub fn sdxl() -> Self { Self { vocab_size: 49408, embed_dim: 768, intermediate_size: 3072, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 768, activation: Activation::QuickGelu, } } // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder_2/config.json pub fn sdxl2() -> Self { Self { vocab_size: 49408, embed_dim: 1280, intermediate_size: 5120, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 32, num_attention_heads: 20, projection_dim: 1280, activation: Activation::Gelu, } } pub fn ssd1b() -> Self { Self::sdxl() } pub fn ssd1b2() -> Self { Self::sdxl2() } // https://huggingface.co/warp-ai/wuerstchen/blob/main/text_encoder/config.json pub fn wuerstchen() -> Self { Self { vocab_size: 49408, embed_dim: 1024, intermediate_size: 4096, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 24, num_attention_heads: 16, projection_dim: 1024, activation: Activation::GeluErf, } } // https://huggingface.co/warp-ai/wuerstchen-prior/blob/main/text_encoder/config.json pub fn wuerstchen_prior() -> Self { Self { vocab_size: 49408, embed_dim: 1280, intermediate_size: 5120, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 32, num_attention_heads: 20, projection_dim: 512, activation: Activation::GeluErf, } } } // CLIP Text Model // https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py #[derive(Debug)] struct ClipTextEmbeddings { token_embedding: candle_nn::Embedding, position_embedding: candle_nn::Embedding, position_ids: Tensor, } impl ClipTextEmbeddings { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let token_embedding = candle_nn::embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?; let position_embedding = candle_nn::embedding( c.max_position_embeddings, c.embed_dim, vs.pp("position_embedding"), )?; let position_ids = Tensor::arange(0u32, c.max_position_embeddings as u32, vs.device())?.unsqueeze(0)?; Ok(ClipTextEmbeddings { token_embedding, position_embedding, position_ids, }) } } impl Module for ClipTextEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let token_embedding = self.token_embedding.forward(xs)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; token_embedding.broadcast_add(&position_embedding) } } #[derive(Debug)] struct ClipAttention { k_proj: candle_nn::Linear, v_proj: candle_nn::Linear, q_proj: candle_nn::Linear, out_proj: candle_nn::Linear, head_dim: usize, scale: f64, num_attention_heads: usize, } impl ClipAttention { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let embed_dim = c.embed_dim; let num_attention_heads = c.num_attention_heads; let k_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("k_proj"))?; let v_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("v_proj"))?; let q_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("q_proj"))?; let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?; let head_dim = embed_dim / num_attention_heads; let scale = (head_dim as f64).powf(-0.5); Ok(ClipAttention { k_proj, v_proj, q_proj, out_proj, head_dim, scale, num_attention_heads, }) } fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> { xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let in_dtype = xs.dtype(); let (bsz, seq_len, embed_dim) = xs.dims3()?; let query_states = (self.q_proj.forward(xs)? * self.scale)?; let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim); let query_states = self .shape(&query_states, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let key_states = self .shape(&self.k_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let value_states = self .shape(&self.v_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let src_len = key_states.dim(1)?; let attn_weights = attn_weights .reshape((bsz, self.num_attention_heads, seq_len, src_len))? .broadcast_add(causal_attention_mask)?; let attn_weights = attn_weights.reshape((bsz * self.num_attention_heads, seq_len, src_len))?; let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?; let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?; let attn_output = attn_output .reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))? .transpose(1, 2)? .reshape((bsz, seq_len, embed_dim))?; self.out_proj.forward(&attn_output) } } #[derive(Debug)] struct ClipMlp { fc1: candle_nn::Linear, fc2: candle_nn::Linear, activation: Activation, } impl ClipMlp { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let fc1 = candle_nn::linear(c.embed_dim, c.intermediate_size, vs.pp("fc1"))?; let fc2 = candle_nn::linear(c.intermediate_size, c.embed_dim, vs.pp("fc2"))?; Ok(ClipMlp { fc1, fc2, activation: c.activation, }) } } impl ClipMlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?; self.fc2.forward(&self.activation.forward(&xs)?) } } #[derive(Debug)] struct ClipEncoderLayer { self_attn: ClipAttention, layer_norm1: candle_nn::LayerNorm, mlp: ClipMlp, layer_norm2: candle_nn::LayerNorm, } impl ClipEncoderLayer { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let self_attn = ClipAttention::new(vs.pp("self_attn"), c)?; let layer_norm1 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm1"))?; let mlp = ClipMlp::new(vs.pp("mlp"), c)?; let layer_norm2 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm2"))?; Ok(ClipEncoderLayer { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self.layer_norm1.forward(xs)?; let xs = self.self_attn.forward(&xs, causal_attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = self.layer_norm2.forward(&xs)?; let xs = self.mlp.forward(&xs)?; xs + residual } } #[derive(Debug)] struct ClipEncoder { layers: Vec<ClipEncoderLayer>, } impl ClipEncoder { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("layers"); let mut layers: Vec<ClipEncoderLayer> = Vec::new(); for index in 0..c.num_hidden_layers { let layer = ClipEncoderLayer::new(vs.pp(index.to_string()), c)?; layers.push(layer) } Ok(ClipEncoder { layers }) } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, causal_attention_mask)?; } Ok(xs) } } /// A CLIP transformer based model. #[derive(Debug)] pub struct ClipTextTransformer { embeddings: ClipTextEmbeddings, encoder: ClipEncoder, final_layer_norm: candle_nn::LayerNorm, } impl ClipTextTransformer { pub fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("text_model"); let embeddings = ClipTextEmbeddings::new(vs.pp("embeddings"), c)?; let encoder = ClipEncoder::new(vs.pp("encoder"), c)?; let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("final_layer_norm"))?; Ok(ClipTextTransformer { embeddings, encoder, final_layer_norm, }) } // https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py#L678 fn build_causal_attention_mask( bsz: usize, seq_len: usize, mask_after: usize, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..seq_len) .flat_map(|i| { (0..seq_len).map(move |j| { if j > i || j > mask_after { f32::MIN } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?; mask.broadcast_as((bsz, seq_len, seq_len)) } pub fn forward_with_mask(&self, xs: &Tensor, mask_after: usize) -> Result<Tensor> { let (bsz, seq_len) = xs.dims2()?; let xs = self.embeddings.forward(xs)?; let causal_attention_mask = Self::build_causal_attention_mask(bsz, seq_len, mask_after, xs.device())?; let xs = self.encoder.forward(&xs, &causal_attention_mask)?; self.final_layer_norm.forward(&xs) } } impl Module for ClipTextTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.forward_with_mask(xs, usize::MAX) } }
candle/candle-transformers/src/models/stable_diffusion/clip.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/clip.rs", "repo_id": "candle", "token_count": 6474 }
47
//! VGG-16 model implementation. //! //! See Very Deep Convolutional Networks for Large-Scale Image Recognition //! <https://arxiv.org/abs/1409.1556> use candle::{ModuleT, Result, Tensor}; use candle_nn::{FuncT, VarBuilder}; // Enum representing the different VGG models pub enum Models { Vgg13, Vgg16, Vgg19, } // Struct representing a VGG model #[derive(Debug)] pub struct Vgg<'a> { blocks: Vec<FuncT<'a>>, } // Struct representing the configuration for the pre-logit layer struct PreLogitConfig { in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, } // Implementation of the VGG model impl<'a> Vgg<'a> { // Function to create a new VGG model pub fn new(vb: VarBuilder<'a>, model: Models) -> Result<Self> { let blocks = match model { Models::Vgg13 => vgg13_blocks(vb)?, Models::Vgg16 => vgg16_blocks(vb)?, Models::Vgg19 => vgg19_blocks(vb)?, }; Ok(Self { blocks }) } } // Implementation of the forward pass for the VGG model impl ModuleT for Vgg<'_> { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { let mut xs = xs.unsqueeze(0)?; for block in self.blocks.iter() { xs = xs.apply_t(block, train)?; } Ok(xs) } } // Function to create a conv2d block // The block is composed of two conv2d layers followed by a max pool layer fn conv2d_block(convs: &[(usize, usize, &str)], vb: &VarBuilder) -> Result<FuncT<'static>> { let layers = convs .iter() .map(|&(in_c, out_c, name)| { candle_nn::conv2d( in_c, out_c, 3, candle_nn::Conv2dConfig { stride: 1, padding: 1, ..Default::default() }, vb.pp(name), ) }) .collect::<Result<Vec<_>>>()?; Ok(FuncT::new(move |xs, _train| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)?.relu()? } xs = xs.max_pool2d_with_stride(2, 2)?; Ok(xs) })) } // Function to create a fully connected layer // The layer is composed of two linear layers followed by a dropout layer fn fully_connected( num_classes: usize, pre_logit_1: PreLogitConfig, pre_logit_2: PreLogitConfig, vb: VarBuilder, ) -> Result<FuncT> { let lin = get_weights_and_biases( &vb.pp("pre_logits.fc1"), pre_logit_1.in_dim, pre_logit_1.target_in, pre_logit_1.target_out, )?; let lin2 = get_weights_and_biases( &vb.pp("pre_logits.fc2"), pre_logit_2.in_dim, pre_logit_2.target_in, pre_logit_2.target_out, )?; let dropout1 = candle_nn::Dropout::new(0.5); let dropout2 = candle_nn::Dropout::new(0.5); let dropout3 = candle_nn::Dropout::new(0.5); Ok(FuncT::new(move |xs, train| { let xs = xs.reshape((1, pre_logit_1.target_out))?; let xs = xs.apply_t(&dropout1, train)?.apply(&lin)?.relu()?; let xs = xs.apply_t(&dropout2, train)?.apply(&lin2)?.relu()?; let lin3 = candle_nn::linear(4096, num_classes, vb.pp("head.fc"))?; let xs = xs.apply_t(&dropout3, train)?.apply(&lin3)?.relu()?; Ok(xs) })) } // Function to get the weights and biases for a layer // This is required because the weights and biases are stored in different format than our linear layer expects fn get_weights_and_biases( vs: &VarBuilder, in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, ) -> Result<candle_nn::Linear> { let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL; let ws = vs.get_with_hints(in_dim, "weight", init_ws)?; let ws = ws.reshape((target_in, target_out))?; let bound = 1. / (target_out as f64).sqrt(); let init_bs = candle_nn::Init::Uniform { lo: -bound, up: bound, }; let bs = vs.get_with_hints(target_in, "bias", init_bs)?; Ok(candle_nn::Linear::new(ws, Some(bs))) } fn vgg13_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block(&[(128, 256, "features.10"), (256, 256, "features.12")], &vb)?, conv2d_block(&[(256, 512, "features.15"), (512, 512, "features.17")], &vb)?, conv2d_block(&[(512, 512, "features.20"), (512, 512, "features.22")], &vb)?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg16_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.17"), (512, 512, "features.19"), (512, 512, "features.21"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.24"), (512, 512, "features.26"), (512, 512, "features.28"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg19_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), (256, 256, "features.16"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.19"), (512, 512, "features.21"), (512, 512, "features.23"), (512, 512, "features.25"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.28"), (512, 512, "features.30"), (512, 512, "features.32"), (512, 512, "features.34"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) }
candle/candle-transformers/src/models/vgg.rs/0
{ "file_path": "candle/candle-transformers/src/models/vgg.rs", "repo_id": "candle", "token_count": 4287 }
48
pub mod text_generation;
candle/candle-transformers/src/pipelines/mod.rs/0
{ "file_path": "candle/candle-transformers/src/pipelines/mod.rs", "repo_id": "candle", "token_count": 7 }
49
use yew_agent::PublicWorker; fn main() { console_error_panic_hook::set_once(); candle_wasm_example_llama2::Worker::register(); }
candle/candle-wasm-examples/llama2-c/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/bin/worker.rs", "repo_id": "candle", "token_count": 54 }
50