text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Hiera model.""" import math import unittest from typing import Dict, List, Tuple from transformers import HieraConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import ( cached_property, is_torch_available, is_vision_available, ) from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import HieraBackbone, HieraForImageClassification, HieraForPreTraining, HieraModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class HieraModelTester: def __init__( self, parent, batch_size=13, image_size=[64, 64], mlp_ratio=1.0, num_channels=3, depths=[1, 1, 1, 1], patch_stride=[4, 4], patch_size=[7, 7], patch_padding=[3, 3], masked_unit_size=[8, 8], num_heads=[1, 1, 1, 1], embed_dim_multiplier=2.0, is_training=True, use_labels=True, embed_dim=8, hidden_act="gelu", decoder_hidden_size=2, decoder_depth=1, decoder_num_heads=1, initializer_range=0.02, scope=None, type_sequence_label_size=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.mlp_ratio = mlp_ratio self.num_channels = num_channels self.depths = depths self.patch_stride = patch_stride self.patch_size = patch_size self.patch_padding = patch_padding self.masked_unit_size = masked_unit_size self.num_heads = num_heads self.embed_dim_multiplier = embed_dim_multiplier self.is_training = is_training self.use_labels = use_labels self.embed_dim = embed_dim self.hidden_act = hidden_act self.decoder_hidden_size = decoder_hidden_size self.decoder_depth = decoder_depth self.decoder_num_heads = decoder_num_heads self.initializer_range = initializer_range self.scope = scope self.type_sequence_label_size = type_sequence_label_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return HieraConfig( embed_dim=self.embed_dim, image_size=self.image_size, patch_stride=self.patch_stride, patch_size=self.patch_size, patch_padding=self.patch_padding, masked_unit_size=self.masked_unit_size, mlp_ratio=self.mlp_ratio, num_channels=self.num_channels, depths=self.depths, num_heads=self.num_heads, embed_dim_multiplier=self.embed_dim_multiplier, hidden_act=self.hidden_act, decoder_hidden_size=self.decoder_hidden_size, decoder_depth=self.decoder_depth, decoder_num_heads=self.decoder_num_heads, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = HieraModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) tokens_spatial_shape = [i // s for i, s in zip(self.image_size, config.patch_stride)] expected_seq_len = math.prod(tokens_spatial_shape) // math.prod(config.query_stride) ** (config.num_query_pool) expected_dim = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_backbone(self, config, pixel_values, labels): model = HieraBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) num_patches = config.image_size[0] // config.patch_stride[0] // config.masked_unit_size[0] self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], num_patches, num_patches] ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = HieraBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], num_patches, num_patches] ) # verify channels self.parent.assertEqual(len(model.channels), 1) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = HieraForPreTraining(config=config) model.to(torch_device) model.eval() result = model(pixel_values) pred_stride = config.patch_stride[-1] * (config.query_stride[-1] ** config.num_query_pool) num_patches = self.image_size[0] // pred_stride self.parent.assertEqual( result.logits.shape, (self.batch_size, num_patches**2, self.num_channels * pred_stride**2) ) # test greyscale images config.num_channels = 1 model = HieraForPreTraining(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size[0], self.image_size[0]]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches**2, pred_stride**2)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = HieraForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = HieraForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size[0], self.image_size[0]]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class HieraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Hiera does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( HieraModel, HieraBackbone, HieraForImageClassification, HieraForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": HieraModel, "image-classification": HieraForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = HieraModelTester(self) self.config_tester = ConfigTester(self, config_class=HieraConfig, has_text_modality=False) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() # Overriding as Hiera `get_input_embeddings` returns HieraPatchEmbeddings def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) # Overriding as attention shape depends on patch_stride and mask_unit_size def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True seq_len = math.prod([i // s for i, s in zip(config.image_size, config.patch_stride)]) mask_unit_area = math.prod(config.masked_unit_size) num_windows = seq_len // mask_unit_area if model_class.__name__ == "HieraForPreTraining": num_windows = int(num_windows * (1 - config.mask_ratio)) seq_len = int(num_windows * mask_unit_area) model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_heads[0], num_windows, mask_unit_area, seq_len // num_windows], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # also another +1 for reshaped_hidden_states added_hidden_states = 1 if model_class.__name__ == "HieraBackbone" else 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_heads[0], num_windows, mask_unit_area, seq_len // num_windows], ) # Overriding as attention shape depends on patch_stride and mask_unit_size def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Hiera has a different seq_length patch_size = config.patch_stride num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if model_class.__name__ == "HieraForPreTraining": mask_unit_area = math.prod(config.masked_unit_size) num_windows = num_patches // mask_unit_area num_windows = int(num_windows * (1 - config.mask_ratio)) num_patches = int(num_windows * mask_unit_area) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) if not model_class.__name__ == "HieraBackbone": reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size = reshaped_hidden_states[0].shape[0] num_channels = reshaped_hidden_states[0].shape[-1] reshaped_hidden_states = reshaped_hidden_states[0].view(batch_size, -1, num_channels) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = self.model_tester.image_size for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class, image_size) # Overriding since HieraForPreTraining outputs bool_masked_pos which has to be converted to float in the msg def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object.float() - dict_object.float()))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() additional_kwargs = {} tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) additional_kwargs["output_hidden_states"] = True check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) if self.has_attentions: # Removing "output_hidden_states" del additional_kwargs["output_hidden_states"] tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) additional_kwargs["output_attentions"] = True check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) additional_kwargs["output_hidden_states"] = True check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs) @unittest.skip(reason="Hiera Transformer does not use feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="Hiera does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ["facebook/hiera-tiny-224-hf"]: model = HieraModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class HieraModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-in1k-hf") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = HieraForImageClassification.from_pretrained("facebook/hiera-tiny-224-in1k-hf").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) expected_pixel_values = torch.tensor( [ [[0.2967, 0.4679, 0.4508], [0.3309, 0.4337, 0.3309], [0.3309, 0.3823, 0.3309]], [[-1.5455, -1.4930, -1.5455], [-1.5280, -1.4755, -1.5980], [-1.5630, -1.5280, -1.4755]], [[-0.6367, -0.4973, -0.5321], [-0.7936, -0.6715, -0.6715], [-0.8284, -0.7413, -0.5670]], ] ).to(torch_device) self.assertTrue(torch.allclose(inputs.pixel_values[0, :3, :3, :3], expected_pixel_values, atol=1e-4)) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([[0.8028, 0.2409, -0.2254, -0.3712, -0.2848]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4)) def test_inference_interpolate_pos_encoding(self): model = HieraModel.from_pretrained("facebook/hiera-tiny-224-hf").to(torch_device) image_processor = AutoImageProcessor.from_pretrained( "facebook/hiera-tiny-224-hf", size={"shortest_edge": 448}, crop_size={"height": 448, "width": 448} ) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 196, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[1.8522, 0.1532, 0.3849], [2.7352, -0.1941, 0.1848], [1.5859, -0.0773, 0.0168]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_for_pretraining(self): # make random mask reproducible torch.manual_seed(2) model = HieraForPreTraining.from_pretrained("facebook/hiera-tiny-224-mae-hf").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) config = model.config mask_spatial_shape = [ i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size) ] num_windows = math.prod(mask_spatial_shape) noise = torch.rand(1, num_windows).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, noise=noise) # verify the logits expected_shape = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [1.6407, 1.6506, 1.6541, 1.6617, 1.6703], [1.9730, 1.9842, 1.9848, 1.9896, 1.9947], [1.5949, 1.8262, 1.2602, 1.4801, 1.4448], [1.2341, 1.7907, 0.8618, 1.5202, 1.4523], [2.0140, 1.9846, 1.9434, 1.9019, 1.8648], ] ) self.assertTrue(torch.allclose(outputs.logits[0, :5, :5], expected_slice.to(torch_device), atol=1e-4)) @require_torch class HieraBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (HieraBackbone,) if is_torch_available() else () config_class = HieraConfig def setUp(self): self.model_tester = HieraModelTester(self)
transformers/tests/models/hiera/test_modeling_hiera.py/0
{ "file_path": "transformers/tests/models/hiera/test_modeling_hiera.py", "repo_id": "transformers", "token_count": 12031 }
437
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import AutoImageProcessor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class ImageGPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, ): super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize def prepare_image_processor_dict(self): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866443634033203, 0.6618829369544983, 0.3891746401786804], [-0.6042559146881104, -0.02295008860528469, 0.5423797369003296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } def expected_output_image_shape(self, images): return (self.size["height"] * self.size["width"],) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ImageGPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ImageGPTImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ImageGPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "clusters")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_image_processor_to_json_string(self): image_processor = self.image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: self.assertEqual(obj[key], value) def test_image_processor_to_json_file(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "image_processor.json") image_processor_first.to_json_file(json_file_path) image_processor_second = self.image_processing_class.from_json_file(json_file_path).to_dict() image_processor_first = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) def test_image_processor_from_and_save_pretrained(self): for image_processing_class in self.image_processor_list: image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(tmpdirname) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname).to_dict() image_processor_first = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) def test_image_processor_save_load_with_autoimageprocessor(self): for image_processing_class in self.image_processor_list: image_processor_first = image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = image_processor_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) image_processor_second = AutoImageProcessor.from_pretrained(tmpdirname) image_processor_first = image_processor_first.to_dict() image_processor_second = image_processor_second.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) @unittest.skip(reason="ImageGPT requires clusters at initialization") def test_init_without_params(self): pass # Override the test from ImageProcessingTestMixin as ImageGPT model takes input_ids as input def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(encoded_images) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) # Override the test from ImageProcessingTestMixin as ImageGPT model takes input_ids as input def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(encoded_images) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) @unittest.skip(reason="ImageGPT assumes clusters for 3 channels") def test_call_numpy_4_channels(self): pass # Override the test from ImageProcessingTestMixin as ImageGPT model takes input_ids as input def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) def prepare_images(): # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") image1 = dataset[4]["image"] image2 = dataset[5]["image"] images = [image1, image2] return images @require_vision @require_torch class ImageGPTImageProcessorIntegrationTest(unittest.TestCase): @slow def test_image(self): image_processing = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") images = prepare_images() # test non-batched encoding = image_processing(images[0], return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (1, 1024)) expected_slice = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist(), expected_slice) # test batched encoding = image_processing(images, return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (2, 1024)) expected_slice = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist(), expected_slice)
transformers/tests/models/imagegpt/test_image_processing_imagegpt.py/0
{ "file_path": "transformers/tests/models/imagegpt/test_image_processing_imagegpt.py", "repo_id": "transformers", "token_count": 4831 }
438
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest from tempfile import TemporaryDirectory import numpy as np import pytest import requests from transformers.testing_utils import ( get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, require_vision, ) from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, CLIPImageProcessor, Kosmos2Processor, PreTrainedTokenizerFast, XLMRobertaTokenizer, XLMRobertaTokenizerFast, ) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers @require_vision class Kosmos2ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor() # We have a SentencePiece fixture for testing slow_tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB) fast_tokenizer = XLMRobertaTokenizerFast(__slow_tokenizer=slow_tokenizer) processor = Kosmos2Processor(image_processor, fast_tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_image_procesor_load_save_reload(self): # make sure load from Hub repo. -> save -> reload locally work image_processor = CLIPImageProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") with TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = CLIPImageProcessor.from_pretrained(tmp_dir) assert image_processor.to_dict() == reloaded_image_processor.to_dict() assert image_processor.to_json_string() == reloaded_image_processor.to_json_string() def test_save_load_pretrained_additional_features(self): processor = Kosmos2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Kosmos2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_processor = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_processor.keys(): self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" encoded_processor = processor(text=input_str, add_eos_token=True) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() # both image and text inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # only text inputs = processor(text=input_str) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) # only image inputs = processor(images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values"]) @require_torch def test_full_processor(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/two_dogs.jpg" processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224") # test with different input formats. # fmt: off texts = [ # no phrase "<grounding> Two puppies sit in a field of grass.", # 1 phrase "<grounding> <phrase> Two puppies </phrase> sit in a field of grass.", # 2 phrases "<grounding> <phrase> Two puppies </phrase> sit in a field of <phrase> grass </phrase>.", # 2 phrases: bboxes already specified for the 1st phrase "<grounding> <phrase> Two puppies </phrase> <object> <patch_index_0079> <patch_index_1016> </delimiter_of_multi_objects/> <patch_index_0135> <patch_index_1008> </object> sit in a field of <phrase> grass </phrase>.", ] # fmt: on image = Image.open(requests.get(url, stream=True).raw) # To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed image_path = os.path.join(self.tmpdirname, "image.jpg") image.save(image_path) image = Image.open(image_path) # fmt: off bboxes = [ [None, []], [[None], [[]], [(79, 1016)], [[(79, 1016)]], [[(79, 1016), (135, 1008)]]], [[[(79, 1016), (135, 1008)], None], [[(79, 1016), (135, 1008)], []], [[(79, 1016), (135, 1008)], (480, 1023)], [[(79, 1016), (135, 1008)], [(480, 1023)]]], [[None, [(480, 1023)]]], ] # fmt: on batch_image = [image] * 4 batch_text = [texts[0], texts[1], texts[1], texts[2]] batch_bboxes = [ None, # no phrase [[]], # 1 phrase: no bbox [(79, 1016)], # 1 phrase: 1 bbox [[(79, 1016), (135, 1008)], (480, 1023)], # 2 phrase: 2 bboxes + 1 bbox ] # fmt: off expected_input_ids = [ [0, 64012, 1264, 17772, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 106, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 64009, 64493, 65036, 64010, 106, 4, 2], ] # fmt: on EXPECTED_PIXEL_VALUES_1 = np.array( [ [ [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6243883967399597, -0.6243883967399597, -0.5951915383338928], ], [ [-0.20629698038101196, -0.19128920137882233, -0.19128920137882233], [-0.20629698038101196, -0.19128920137882233, -0.17628143727779388], [-0.2213047444820404, -0.20629698038101196, -0.16127367317676544], ], [ [-0.5843556523323059, -0.5701355338096619, -0.5701355338096619], [-0.5843556523323059, -0.5701355338096619, -0.5559154152870178], [-0.5843556523323059, -0.5559154152870178, -0.5416953563690186], ], ] ) EXPECTED_PIXEL_VALUES_2 = np.array( [ [ [-0.4346088469028473, -0.47840413451194763, -0.7849710583686829], [-0.5221993923187256, -0.5076009631156921, -0.755774199962616], [-0.5221993923187256, -0.5076009631156921, -0.7411757707595825], ], [ [-0.2813358008861542, -0.2963435649871826, -0.431413471698761], [-0.26632803678512573, -0.2963435649871826, -0.4764367938041687], [-0.2213047444820404, -0.2813358008861542, -0.49144455790519714], ], [ [-0.5701355338096619, -0.641235888004303, -0.7549964189529419], [-0.5843556523323059, -0.641235888004303, -0.7834365367889404], [-0.5559154152870178, -0.641235888004303, -0.7834365367889404], ], ] ) def check(texts, bboxes, expected_input_ids): outputs = processor(images=None, text=texts, bboxes=bboxes, add_eos_token=True) self.assertListEqual(outputs.input_ids, expected_input_ids) # no phrase check(texts[0], bboxes[0][0], expected_input_ids[0]) # no phrase check(texts[0], bboxes[0][1], expected_input_ids[0]) # 1 phrase: no bbox check(texts[1], bboxes[1][0], expected_input_ids[1]) # 1 phrase: no bbox check(texts[1], bboxes[1][1], expected_input_ids[1]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][2], expected_input_ids[2]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][3], expected_input_ids[2]) # 1 phrase: 2 bboxes check(texts[1], bboxes[1][4], expected_input_ids[3]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[1], bboxes=[[None]]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][0], expected_input_ids[4]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][1], expected_input_ids[4]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][2], expected_input_ids[5]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][3], expected_input_ids[5]) # 2 phrase: no box (as already specified in the text) + 1 bbox check(texts[3], bboxes[3][0], expected_input_ids[5]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[2], bboxes=[[(79, 1016), (135, 1008)], [None]]) # test batch outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, add_eos_token=True, ) self.assertListEqual( outputs.input_ids, [expected_input_ids[0], expected_input_ids[1], expected_input_ids[2], expected_input_ids[5]], ) # test batch with padding (without `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask[-1], [1] * len(expected_input_ids[5])) # test batch with padding (with `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids.numpy().tolist()[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask.numpy().tolist()[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], [1] * len(expected_input_ids[5])) # test with image num_image_tokens = 64 outputs = processor(images=image, text=texts[0], bboxes=None, add_eos_token=True) self.assertTupleEqual(outputs.pixel_values[0].shape, (3, 224, 224)) self.assertListEqual( outputs.input_ids, [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], ) self.assertListEqual( outputs.image_embeds_position_mask, [0] * 2 + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[0]) - 1), ) np.testing.assert_allclose(outputs.pixel_values[0][:3, :3, :3], EXPECTED_PIXEL_VALUES_1, atol=1e-9) np.testing.assert_allclose(outputs.pixel_values[0][:3, -3:, -3:], EXPECTED_PIXEL_VALUES_2, atol=1e-9) # test with image in batch (right padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) self.assertTupleEqual(outputs.pixel_values.shape, (4, 3, 224, 224)) np.testing.assert_allclose( outputs.pixel_values[:, :3, :3, :3].numpy(), [EXPECTED_PIXEL_VALUES_1] * len(batch_image), atol=1e-9 ) np.testing.assert_allclose( outputs.pixel_values[:, :3, -3:, -3:].numpy(), [EXPECTED_PIXEL_VALUES_2] * len(batch_image), atol=1e-9 ) # padding on the right: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH_RIGHT_PADDING = [ [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH_RIGHT_PADDING = [ [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH_RIGHT_PADDING[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH_RIGHT_PADDING[-1]) self.assertListEqual( outputs.image_embeds_position_mask.numpy().tolist(), [[0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1)] * len(batch_image), ) processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") # test with image in batch (left padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the left: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH = [ [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH =[ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] EXPECTED_IMG_POS_MASK_BATCH = [ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 0] + [1] * num_image_tokens + [0] + [0] * len(expected_input_ids[0][1:]), [0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH[0]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[0], EXPECTED_IMG_POS_MASK_BATCH[0]) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH[-1]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[-1], EXPECTED_IMG_POS_MASK_BATCH[-1])
transformers/tests/models/kosmos2/test_processor_kosmos2.py/0
{ "file_path": "transformers/tests/models/kosmos2/test_processor_kosmos2.py", "repo_id": "transformers", "token_count": 9851 }
439
# coding=utf-8 # Copyright 2022 Google LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import LongT5Config, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, AutoTokenizer, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, ) class LongT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return LongT5Config( vocab_size=166, # longt5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def get_config(self): return LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [LongT5Model, LongT5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LongT5Model, LongT5ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (LongT5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": LongT5Model, "summarization": LongT5ForConditionalGeneration, "text2text-generation": LongT5ForConditionalGeneration, "translation": LongT5ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_torchscript = True test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True def setUp(self): self.model_tester = LongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/long-t5-local-base" model = LongT5Model.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = LongT5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/longt5_test.onnx", export_params=True, opset_version=13, input_names=["input_ids", "decoder_input_ids"], ) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] max_length = config_and_inputs[1].shape[-1] + 3 model = LongT5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} # Explicitly pass decoder_head_mask as it is required from LONGT5 model when head_mask specified if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1], num_beams=1, max_length=max_length, output_attentions=True, return_dict_in_generate=True, **head_masks, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): block_len = getattr(self.model_tester, "block_len", None) encoder_expected_shape = (batch_size, 2, config.num_attention_heads, block_len, 3 * block_len) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class LongT5TGlobalModelTest(LongT5ModelTest): def setUp(self): self.model_tester = LongT5ModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_len = encoder_seq_length // global_block_size if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_length = seq_length // global_block_size encoder_expected_shape = ( batch_size, 2, config.num_attention_heads, block_len, 3 * block_len + global_seq_length, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) class LongT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = LongT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class LongT5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (LongT5EncoderModel,) if is_torch_available() else () test_pruning = False test_torchscript = True test_resize_embeddings = False test_model_parallel = False def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", 4) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass class LongT5EncoderOnlyTGlobalModelTest(LongT5EncoderOnlyModelTest): def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", None) seq_len = getattr(self.model_tester, "seq_length", None) global_block_size = getattr(self.model_tester, "global_block_size", 4) global_seq_len = seq_len // global_block_size for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_sentencepiece @require_tokenizers class LongT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps").to( torch_device ) @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") def expected_summary(self): return [ "background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in" " developing world . it provides an excellent resolution for visualization of the coronaryarteries for" " catheter - based or operating interventions . although the association of this technique with major" " complications such as mortality is highly uncommon , it is frequently associated with various cardiac" " and noncardiac complications.materials and methods : in aortic stenosis , we aimed to report the" " diagnostic performance of 128-slice computed tomography coronary angiogram in 50 patients undergoing for" " major noncoron ary cardiac surgery referred" ] @slow def test_summarization(self): model = self.model tok = self.tokenizer ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque ) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement , double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc . in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """ dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, ) @slow def test_inference_hidden_states(self): model = self.model input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) decoder_input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) attention_mask = torch.tensor( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) output = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, output_hidden_states=True ) # check if encoder_outputs match expected_output_slice = torch.tensor([0.0629, -0.1294, -0.0089, 0.0772, 0.0663], device=torch_device) self.assertTrue(torch.allclose(output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, atol=1e-4)) # check if logits match expected_output_slice = torch.tensor([5.5231, 6.1058, 3.1766, 8.2391, -5.9453], device=torch_device) self.assertTrue(torch.allclose(output.logits[0, 0, :5], expected_output_slice, atol=1e-4))
transformers/tests/models/longt5/test_modeling_longt5.py/0
{ "file_path": "transformers/tests/models/longt5/test_modeling_longt5.py", "repo_id": "transformers", "token_count": 30001 }
440
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import MarianConfig, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers import MarianTokenizer from transformers.models.marian.modeling_flax_marian import FlaxMarianModel, FlaxMarianMTModel, shift_tokens_right def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxMarianModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = MarianConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxMarianModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = (FlaxMarianModel, FlaxMarianMTModel) if is_flax_available() else () all_generative_model_classes = (FlaxMarianMTModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxMarianModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("Helsinki-NLP/opus-mt-en-de") # FlaxMarianForSequenceClassification expects eos token in input_ids input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @require_flax @require_sentencepiece @require_tokenizers class MarianIntegrationTest(unittest.TestCase): src = None tgt = None @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self): return MarianTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): model: FlaxMarianMTModel = FlaxMarianMTModel.from_pretrained(self.model_name) self.assertEqual(model.config.decoder_start_token_id, model.config.pad_token_id) return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="np", **tokenizer_kwargs) generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128, ).sequences generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @require_flax @require_sentencepiece @require_tokenizers class TestMarian_EN_FR(MarianIntegrationTest): src = "en" tgt = "fr" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", ] expected_text = [ "Je suis une petite grenouille.", "Maintenant, je peux oublier les 100 mots d'allemand que je connais.", ] @slow def test_batch_generation_en_fr(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_FR_EN(MarianIntegrationTest): src = "fr" tgt = "en" src_text = [ "Donnez moi le micro.", "Tom et Mary étaient assis à une table.", # Accents ] expected_text = [ "Give me the microphone.", "Tom and Mary were sitting at a table.", ] @slow def test_batch_generation_fr_en(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_MT_EN(MarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_EN_DE(MarianIntegrationTest): src = "en" tgt = "de" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", "Tom asked his teacher for advice.", "That's how I would do it.", "Tom really admired Mary's courage.", "Turn around and close your eyes.", ] expected_text = [ "Ich bin ein kleiner Frosch.", "Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.", "Tom bat seinen Lehrer um Rat.", "So würde ich das machen.", "Tom bewunderte Marias Mut wirklich.", "Drehen Sie sich um und schließen Sie die Augen.", ] @slow def test_batch_generation_en_de(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_en_zh(MarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @slow def test_batch_generation_eng_zho(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_RU_FR(MarianIntegrationTest): src = "ru" tgt = "fr" src_text = ["Он показал мне рукопись своей новой пьесы."] expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."] @slow def test_batch_generation_ru_fr(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_en_ROMANCE(MarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected()
transformers/tests/models/marian/test_modeling_flax_marian.py/0
{ "file_path": "transformers/tests/models/marian/test_modeling_flax_marian.py", "repo_id": "transformers", "token_count": 8242 }
441
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MobileNetV2 model.""" import unittest from transformers import MobileNetV2Config from transformers.testing_utils import is_flaky, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation, MobileNetV2Model if is_vision_available(): from PIL import Image from transformers import MobileNetV2ImageProcessor class MobileNetV2ConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "tf_padding")) self.parent.assertTrue(hasattr(config, "depth_multiplier")) class MobileNetV2ModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=32, depth_multiplier=0.25, depth_divisible_by=8, min_depth=8, expand_ratio=6, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, tf_padding=True, hidden_act="relu6", last_hidden_size=1280, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.depth_divisible_by = depth_divisible_by self.min_depth = min_depth self.expand_ratio = expand_ratio self.tf_padding = tf_padding self.output_stride = output_stride self.first_layer_is_expansion = first_layer_is_expansion self.finegrained_output = finegrained_output self.hidden_act = hidden_act self.last_hidden_size = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier) self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileNetV2Config( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileNetV2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV2ForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileNetV2Model, MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": MobileNetV2Model, "image-classification": MobileNetV2ForImageClassification, "image-segmentation": MobileNetV2ForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileNetV2ModelTester(self) self.config_tester = MobileNetV2ConfigTester(self, config_class=MobileNetV2Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="MobileNetV2 does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 16 self.assertEqual(len(hidden_states), expected_num_stages) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/mobilenet_v2_1.4_224" model = MobileNetV2Model.from_pretrained(model_name) self.assertIsNotNone(model) @is_flaky(description="is_flaky https://github.com/huggingface/transformers/issues/29516") def test_batching_equivalence(self): super().test_batching_equivalence() # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileNetV2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV2ImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV2ForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.2445, -1.1993, 0.1905]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") model = model.to(torch_device) image_processor = MobileNetV2ImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 65, 65)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py/0
{ "file_path": "transformers/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py", "repo_id": "transformers", "token_count": 5524 }
442
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMT5ForConditionalGeneration from transformers.models.t5.modeling_flax_t5 import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class MT5IntegrationTest(unittest.TestCase): @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_mt5_checkpoint = '<fill_in>' >>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small") tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
transformers/tests/models/mt5/test_modeling_flax_mt5.py/0
{ "file_path": "transformers/tests/models/mt5/test_modeling_flax_mt5.py", "repo_id": "transformers", "token_count": 950 }
443
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.models.nllb.tokenization_nllb import FAIRSEQ_LANGUAGE_CODES from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.m2m_100.modeling_m2m_100 import shift_tokens_right EN_CODE = 256047 RO_CODE = 256145 @require_sentencepiece @require_tokenizers class NllbTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/nllb-200-distilled-600M" tokenizer_class = NllbTokenizer rust_tokenizer_class = NllbTokenizerFast test_rust_tokenizer = True test_sentencepiece = True from_pretrained_kwargs = {} def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = NllbTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = NllbTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) # overwrite from test_tokenization_common to speed up test def test_save_pretrained(self): self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: self.skipTest(reason="test_seq2seq is set to False") tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng_Latn", tgt_lang="ron_Latn", ) except NotImplementedError: self.skipTest(reason="Encountered NotImplementedError when calling prepare_seq2seq_batch") self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_text, tgt_texts=tgt_text, max_length=3, return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 3) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=3, max_target_length=10, return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) @unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.") def test_save_slow_from_fast_and_reload_fast(self): pass def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, # , from_slow=True <- unfortunately too slow to convert ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @unittest.skip(reason="Need to fix this after #26538") def test_training_new_tokenizer(self): pass def test_new_language_codes(self): code1, code2 = "myv_Cyrl", "myv_Latn" new_codes = FAIRSEQ_LANGUAGE_CODES + [code1, code2] # here I create a tokenizer with the default behaviour tok1 = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") # here I enhance the model's vocabulary with two new language codes tok2 = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M", additional_special_tokens=new_codes) # testing that the new codes can work self.assertEqual(len(tok2), len(tok1) + 2) tok2.tgt_lang = code1 tok2.src_lang = code2 self.assertEqual(tok2("šumbrat!").input_ids[0], tok2.convert_tokens_to_ids(code2)) with tempfile.TemporaryDirectory() as tempdir: # testing that saving and loading the tokenizer preserves the new behaviour tok2.save_pretrained(tempdir) tok3 = NllbTokenizer.from_pretrained(tempdir) self.assertEqual(tok2.get_vocab(), tok3.get_vocab()) tok3.src_lang = code2 self.assertEqual(tok3("šumbrat!").input_ids[0], tok3.convert_tokens_to_ids(code2)) # testing that saving and loading the tokenizer preserves the new behaviour tok2.save_pretrained(tempdir) tok3 = NllbTokenizer(f"{tempdir}/sentencepiece.bpe.model", additional_special_tokens=None) self.assertEqual(len(tok3), 256204) # legacy tok4 = NllbTokenizer(f"{tempdir}/sentencepiece.bpe.model", additional_special_tokens=[]) self.assertEqual(len(tok4), 256002) tok5 = NllbTokenizer(f"{tempdir}/sentencepiece.bpe.model", additional_special_tokens=[code1, code2]) self.assertEqual(len(tok5), 256004) @require_torch @require_sentencepiece @require_tokenizers class NllbDistilledIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/nllb-200-distilled-600M" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def setUpClass(cls): cls.tokenizer: NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name, src_lang="eng_Latn", tgt_lang="ron_Latn" ) cls.pad_token_id = 1 return cls def test_enro_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-1], 2) self.assertEqual(ids[0], EN_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [256203, 3]) @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.convert_tokens_to_ids("ron_Latn") ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 15), batch.input_ids.shape) self.assertEqual((2, 15), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(RO_CODE, batch.decoder_input_ids[0, 0]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE]) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right( labels, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.convert_tokens_to_ids(self.tokenizer.tgt_lang), ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="eng_Latn", tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[256047, 70, 7356, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256057, }, ) @require_torch def test_legacy_behaviour(self): self.tokenizer.legacy_behaviour = True inputs = self.tokenizer( "UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids, [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) self.tokenizer.legacy_behaviour = False inputs = self.tokenizer( "UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids, [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
transformers/tests/models/nllb/test_tokenization_nllb.py/0
{ "file_path": "transformers/tests/models/nllb/test_tokenization_nllb.py", "repo_id": "transformers", "token_count": 9740 }
444
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import OpenAIGPTConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.openai.modeling_tf_openai import ( TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification, TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel, ) class TFOpenAIGPTModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, # intermediate_size=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFOpenAIGPTModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFOpenAIGPTLMHeadModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_openai_gpt_double_head( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args ): model = TFOpenAIGPTDoubleHeadsModel(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "mc_token_ids": mc_token_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices)) def create_and_check_openai_gpt_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": sequence_labels, } model = TFOpenAIGPTForSequenceClassification(config) result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFOpenAIGPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification) if is_tf_available() else () ) all_generative_model_classes = ( (TFOpenAIGPTLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly pipeline_model_mapping = ( { "feature-extraction": TFOpenAIGPTModel, "text-classification": TFOpenAIGPTForSequenceClassification, "text-generation": TFOpenAIGPTLMHeadModel, "zero-shot": TFOpenAIGPTForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def setUp(self): self.model_tester = TFOpenAIGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_openai_gpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs) def test_openai_gpt_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs) def test_openai_gpt_double_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs) def test_openai_gpt_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "openai-community/openai-gpt" model = TFOpenAIGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_openai_gpt(self): model = TFOpenAIGPTLMHeadModel.from_pretrained("openai-community/openai-gpt") input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is expected_output_ids = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
transformers/tests/models/openai/test_modeling_tf_openai.py/0
{ "file_path": "transformers/tests/models/openai/test_modeling_tf_openai.py", "repo_id": "transformers", "token_count": 5003 }
445
# coding=utf-8 # Copyright 2023 IBM and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PatchTSMixer model.""" import inspect import itertools import random import tempfile import unittest from typing import Dict, List, Optional, Tuple, Union import numpy as np from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import ( MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, PatchTSMixerConfig, PatchTSMixerForPrediction, PatchTSMixerForPretraining, PatchTSMixerForRegression, PatchTSMixerForTimeSeriesClassification, PatchTSMixerModel, ) from transformers.models.patchtsmixer.modeling_patchtsmixer import ( PatchTSMixerEncoder, PatchTSMixerForPredictionHead, PatchTSMixerForPredictionOutput, PatchTSMixerForRegressionOutput, PatchTSMixerForTimeSeriesClassificationOutput, PatchTSMixerLinearHead, PatchTSMixerPretrainHead, ) @require_torch class PatchTSMixerModelTester: def __init__( self, context_length: int = 32, patch_length: int = 8, num_input_channels: int = 3, patch_stride: int = 8, # d_model: int = 128, hidden_size: int = 8, # num_layers: int = 8, num_hidden_layers: int = 2, expansion_factor: int = 2, dropout: float = 0.5, mode: str = "common_channel", gated_attn: bool = True, norm_mlp="LayerNorm", swin_hier: int = 0, # masking related mask_type: str = "forecast", random_mask_ratio=0.5, mask_patches: list = [2, 3], forecast_mask_ratios: list = [1, 1], mask_value=0, masked_loss: bool = False, mask_mode: str = "mask_before_encoder", channel_consistent_masking: bool = True, scaling: Optional[Union[str, bool]] = "std", # Head related head_dropout: float = 0.2, # forecast related prediction_length: int = 16, out_channels: int = None, # Classification/regression related # num_labels: int = 3, num_targets: int = 3, output_range: list = None, head_aggregation: str = None, # Trainer related batch_size=13, is_training=True, seed_number=42, post_init=True, num_parallel_samples=4, ): self.num_input_channels = num_input_channels self.context_length = context_length self.patch_length = patch_length self.patch_stride = patch_stride # self.d_model = d_model self.hidden_size = hidden_size self.expansion_factor = expansion_factor # self.num_layers = num_layers self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.mode = mode self.gated_attn = gated_attn self.norm_mlp = norm_mlp self.swin_hier = swin_hier self.scaling = scaling self.head_dropout = head_dropout # masking related self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio self.mask_patches = mask_patches self.forecast_mask_ratios = forecast_mask_ratios self.mask_value = mask_value self.channel_consistent_masking = channel_consistent_masking self.mask_mode = mask_mode self.masked_loss = masked_loss # patching related self.patch_last = True # forecast related self.prediction_length = prediction_length self.out_channels = out_channels # classification/regression related # self.num_labels = num_labels self.num_targets = num_targets self.output_range = output_range self.head_aggregation = head_aggregation # Trainer related self.batch_size = batch_size self.is_training = is_training self.seed_number = seed_number self.post_init = post_init self.num_parallel_samples = num_parallel_samples def get_config(self): config_ = PatchTSMixerConfig( num_input_channels=self.num_input_channels, context_length=self.context_length, patch_length=self.patch_length, patch_stride=self.patch_stride, # d_model = self.d_model, d_model=self.hidden_size, expansion_factor=self.expansion_factor, # num_layers = self.num_layers, num_layers=self.num_hidden_layers, dropout=self.dropout, mode=self.mode, gated_attn=self.gated_attn, norm_mlp=self.norm_mlp, swin_hier=self.swin_hier, scaling=self.scaling, head_dropout=self.head_dropout, mask_type=self.mask_type, random_mask_ratio=self.random_mask_ratio, mask_patches=self.mask_patches, forecast_mask_ratios=self.forecast_mask_ratios, mask_value=self.mask_value, channel_consistent_masking=self.channel_consistent_masking, mask_mode=self.mask_mode, masked_loss=self.masked_loss, prediction_length=self.prediction_length, out_channels=self.out_channels, # num_labels=self.num_labels, num_targets=self.num_targets, output_range=self.output_range, head_aggregation=self.head_aggregation, post_init=self.post_init, ) self.num_patches = config_.num_patches return config_ def prepare_patchtsmixer_inputs_dict(self, config): _past_length = config.context_length # bs, n_vars, num_patch, patch_length # [bs x context_length x n_vars] past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels]) inputs_dict = { "past_values": past_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_patchtsmixer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class PatchTSMixerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( PatchTSMixerModel, PatchTSMixerForPrediction, PatchTSMixerForPretraining, PatchTSMixerForTimeSeriesClassification, PatchTSMixerForRegression, ) if is_torch_available() else () ) all_generative_model_classes = ( (PatchTSMixerForPrediction, PatchTSMixerForPretraining) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": PatchTSMixerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False test_resize_embeddings = True test_resize_position_embeddings = False test_mismatched_shapes = True test_model_parallel = False has_attentions = False def setUp(self): self.model_tester = PatchTSMixerModelTester() self.config_tester = ConfigTester( self, config_class=PatchTSMixerConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, common_properties=["hidden_size", "expansion_factor", "num_hidden_layers"], ) def test_config(self): self.config_tester.run_common_tests() def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if model_class == PatchTSMixerForPrediction: rng = random.Random(self.model_tester.seed_number) labels = floats_tensor( [ self.model_tester.batch_size, self.model_tester.prediction_length, self.model_tester.num_input_channels, ], rng=rng, ) inputs_dict["future_values"] = labels elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING): rng = random.Random(self.model_tester.seed_number) labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng) inputs_dict["target_values"] = labels elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING): rng = random.Random(self.model_tester.seed_number) labels = floats_tensor([self.model_tester.batch_size, self.model_tester.num_targets], rng=rng) inputs_dict["target_values"] = labels inputs_dict["output_hidden_states"] = True return inputs_dict def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers, ) self.assertEqual(len(hidden_states), expected_num_layers) expected_hidden_size = self.model_tester.hidden_size self.assertEqual(hidden_states[0].shape[-1], expected_hidden_size) num_patch = self.model_tester.num_patches self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patch, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="No tokens embeddings") def test_resize_tokens_embeddings(self): pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) output_ = model(**dict_inputs, return_dict=True, **additional_kwargs) attributes_ = vars(output_) dict_output = tuple(attributes_.values()) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5, ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print(model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) tuple_inputs.update({"output_hidden_states": False}) dict_inputs.update({"output_hidden_states": False}) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) tuple_inputs.update({"output_hidden_states": False}) dict_inputs.update({"output_hidden_states": False}) check_equivalence( model, tuple_inputs, dict_inputs, ) def test_model_main_input_name(self): model_signature = inspect.signature(getattr(PatchTSMixerModel, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(PatchTSMixerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model_class == PatchTSMixerForPretraining: expected_arg_names = [ "past_values", "observed_mask", "output_hidden_states", "return_loss", ] elif model_class == PatchTSMixerModel: expected_arg_names = [ "past_values", "observed_mask", "output_hidden_states", ] elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values( MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING ): expected_arg_names = [ "past_values", "target_values", "output_hidden_states", "return_loss", ] else: # PatchTSMixerForPrediction expected_arg_names = [ "past_values", "observed_mask", "future_values", "output_hidden_states", "return_loss", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() @unittest.skip(reason="Model does not have input embeddings") def test_model_get_set_embeddings(self): pass def prepare_batch(repo_id="ibm/patchtsmixer-etth1-test-data", file="pretrain_batch.pt"): # TODO: Make repo public file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset") batch = torch.load(file, map_location=torch_device) return batch @require_torch @slow class PatchTSMixerModelIntegrationTests(unittest.TestCase): def test_pretrain_head(self): model = PatchTSMixerForPretraining.from_pretrained("ibm/patchtsmixer-etth1-pretrain").to(torch_device) batch = prepare_batch() torch.manual_seed(0) with torch.no_grad(): output = model(past_values=batch["past_values"].to(torch_device)).prediction_outputs num_patch = ( max(model.config.context_length, model.config.patch_length) - model.config.patch_length ) // model.config.patch_stride + 1 expected_shape = torch.Size( [ 64, model.config.num_input_channels, num_patch, model.config.patch_length, ] ) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[[-0.9106]],[[1.5326]],[[-0.8245]],[[0.7439]],[[-0.7830]],[[2.6256]],[[-0.6485]],]],device=torch_device) # fmt: skip self.assertTrue(torch.allclose(output[0, :7, :1, :1], expected_slice, atol=TOLERANCE)) def test_forecasting_head(self): model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-forecasting").to(torch_device) batch = prepare_batch(file="forecast_batch.pt") model.eval() torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"].to(torch_device), future_values=batch["future_values"].to(torch_device), ).prediction_outputs expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.2471, 0.5036, 0.3596, 0.5401, -0.0985, 0.3423, -0.8439]], device=torch_device, ) self.assertTrue(torch.allclose(output[0, :1, :7], expected_slice, atol=TOLERANCE)) def test_prediction_generation(self): model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-generate").to(torch_device) batch = prepare_batch(file="forecast_batch.pt") print(batch["past_values"]) torch.manual_seed(0) model.eval() with torch.no_grad(): outputs = model.generate(past_values=batch["past_values"].to(torch_device)) expected_shape = torch.Size((64, 1, model.config.prediction_length, model.config.num_input_channels)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor( [[0.4308, -0.4731, 1.3512, -0.1038, -0.4655, 1.1279, -0.7179]], device=torch_device, ) mean_prediction = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[0, -1:], expected_slice, atol=TOLERANCE)) @require_torch class PatchTSMixerFunctionalTests(unittest.TestCase): @classmethod def setUpClass(cls): """Setup method: Called once before test-cases execution""" cls.params = {} cls.params.update( context_length=32, patch_length=8, num_input_channels=3, patch_stride=8, d_model=4, expansion_factor=2, num_layers=3, dropout=0.2, mode="common_channel", # common_channel, mix_channel gated_attn=True, norm_mlp="LayerNorm", mask_type="random", random_mask_ratio=0.5, mask_patches=[2, 3], forecast_mask_ratios=[1, 1], mask_value=0, masked_loss=True, channel_consistent_masking=True, head_dropout=0.2, prediction_length=64, out_channels=None, # num_labels=3, num_targets=3, output_range=None, head_aggregation=None, scaling="std", use_positional_encoding=False, positional_encoding="sincos", self_attn=False, self_attn_heads=1, num_parallel_samples=4, ) cls.num_patches = ( max(cls.params["context_length"], cls.params["patch_length"]) - cls.params["patch_length"] ) // cls.params["patch_stride"] + 1 # batch_size = 32 batch_size = 2 int(cls.params["prediction_length"] / cls.params["patch_length"]) cls.data = torch.rand( batch_size, cls.params["context_length"], cls.params["num_input_channels"], ) cls.enc_data = torch.rand( batch_size, cls.params["num_input_channels"], cls.num_patches, cls.params["patch_length"], ) cls.enc_output = torch.rand( batch_size, cls.params["num_input_channels"], cls.num_patches, cls.params["d_model"], ) cls.flat_enc_output = torch.rand( batch_size, cls.num_patches, cls.params["d_model"], ) cls.correct_pred_output = torch.rand( batch_size, cls.params["prediction_length"], cls.params["num_input_channels"], ) cls.correct_regression_output = torch.rand(batch_size, cls.params["num_targets"]) cls.correct_pretrain_output = torch.rand( batch_size, cls.params["num_input_channels"], cls.num_patches, cls.params["patch_length"], ) cls.correct_forecast_output = torch.rand( batch_size, cls.params["prediction_length"], cls.params["num_input_channels"], ) cls.correct_sel_forecast_output = torch.rand(batch_size, cls.params["prediction_length"], 2) cls.correct_classification_output = torch.rand( batch_size, cls.params["num_targets"], ) cls.correct_classification_classes = torch.randint(0, cls.params["num_targets"], (batch_size,)) def test_patchtsmixer_encoder(self): config = PatchTSMixerConfig(**self.__class__.params) enc = PatchTSMixerEncoder(config) output = enc(self.__class__.enc_data) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) def test_patchmodel(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerModel(config) output = mdl(self.__class__.data) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.patch_input.shape, self.__class__.enc_data.shape) def test_pretrainhead(self): config = PatchTSMixerConfig(**self.__class__.params) head = PatchTSMixerPretrainHead( config=config, ) output = head(self.__class__.enc_output) self.assertEqual(output.shape, self.__class__.correct_pretrain_output.shape) def test_pretrain_full(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerForPretraining(config) output = mdl(self.__class__.data) self.assertEqual( output.prediction_outputs.shape, self.__class__.correct_pretrain_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) def test_pretrain_full_with_return_dict(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerForPretraining(config) output = mdl(self.__class__.data, return_dict=False) self.assertEqual(output[1].shape, self.__class__.correct_pretrain_output.shape) self.assertEqual(output[2].shape, self.__class__.enc_output.shape) self.assertEqual(output[0].item() < np.inf, True) def test_forecast_head(self): config = PatchTSMixerConfig(**self.__class__.params) head = PatchTSMixerForPredictionHead( config=config, ) # output = head(self.__class__.enc_output, raw_data = self.__class__.correct_pretrain_output) output = head(self.__class__.enc_output) self.assertEqual(output.shape, self.__class__.correct_forecast_output.shape) def check_module( self, task, params=None, output_hidden_states=True, ): config = PatchTSMixerConfig(**params) if task == "forecast": mdl = PatchTSMixerForPrediction(config) target_input = self.__class__.correct_forecast_output if config.prediction_channel_indices is not None: target_output = self.__class__.correct_sel_forecast_output else: target_output = target_input ref_samples = target_output.unsqueeze(1).expand(-1, config.num_parallel_samples, -1, -1) ground_truth_arg = "future_values" output_predictions_arg = "prediction_outputs" elif task == "classification": mdl = PatchTSMixerForTimeSeriesClassification(config) target_input = self.__class__.correct_classification_classes target_output = self.__class__.correct_classification_output ground_truth_arg = "target_values" output_predictions_arg = "prediction_outputs" elif task == "regression": mdl = PatchTSMixerForRegression(config) target_input = self.__class__.correct_regression_output target_output = self.__class__.correct_regression_output ref_samples = target_output.unsqueeze(1).expand(-1, config.num_parallel_samples, -1) ground_truth_arg = "target_values" output_predictions_arg = "regression_outputs" elif task == "pretrain": mdl = PatchTSMixerForPretraining(config) target_input = None target_output = self.__class__.correct_pretrain_output ground_truth_arg = None output_predictions_arg = "prediction_outputs" else: print("invalid task") enc_output = self.__class__.enc_output if target_input is None: output = mdl(self.__class__.data, output_hidden_states=output_hidden_states) else: output = mdl( self.__class__.data, **{ ground_truth_arg: target_input, "output_hidden_states": output_hidden_states, }, ) prediction_outputs = getattr(output, output_predictions_arg) if isinstance(prediction_outputs, tuple): for t in prediction_outputs: self.assertEqual(t.shape, target_output.shape) else: self.assertEqual(prediction_outputs.shape, target_output.shape) self.assertEqual(output.last_hidden_state.shape, enc_output.shape) if output_hidden_states is True: self.assertEqual(len(output.hidden_states), params["num_layers"]) else: self.assertEqual(output.hidden_states, None) self.assertEqual(output.loss.item() < np.inf, True) if config.loss == "nll" and task in ["forecast", "regression"]: samples = mdl.generate(self.__class__.data) self.assertEqual(samples.sequences.shape, ref_samples.shape) @parameterized.expand( list( itertools.product( ["common_channel", "mix_channel"], [True, False], [True, False, "mean", "std"], [True, False], [None, [0, 2]], ["mse", "nll"], ) ) ) def test_forecast(self, mode, self_attn, scaling, gated_attn, prediction_channel_indices, loss): params = self.__class__.params.copy() params.update( mode=mode, self_attn=self_attn, scaling=scaling, prediction_channel_indices=prediction_channel_indices, gated_attn=gated_attn, loss=loss, ) self.check_module(task="forecast", params=params) @parameterized.expand( list( itertools.product( ["common_channel", "mix_channel"], [True, False], [True, False, "mean", "std"], [True, False], ["max_pool", "avg_pool"], ) ) ) def test_classification(self, mode, self_attn, scaling, gated_attn, head_aggregation): params = self.__class__.params.copy() params.update( mode=mode, self_attn=self_attn, scaling=scaling, head_aggregation=head_aggregation, gated_attn=gated_attn, ) self.check_module(task="classification", params=params) @parameterized.expand( list( itertools.product( ["common_channel", "mix_channel"], [True, False], [True, False, "mean", "std"], [True, False], ["max_pool", "avg_pool"], ["mse", "nll"], ) ) ) def test_regression(self, mode, self_attn, scaling, gated_attn, head_aggregation, loss): params = self.__class__.params.copy() params.update( mode=mode, self_attn=self_attn, scaling=scaling, head_aggregation=head_aggregation, gated_attn=gated_attn, loss=loss, ) self.check_module(task="regression", params=params) @parameterized.expand( list( itertools.product( ["common_channel", "mix_channel"], [True, False], [True, False, "mean", "std"], [True, False], ["random", "forecast"], [True, False], [True, False], ) ) ) def test_pretrain( self, mode, self_attn, scaling, gated_attn, mask_type, masked_loss, channel_consistent_masking, ): params = self.__class__.params.copy() params.update( mode=mode, self_attn=self_attn, scaling=scaling, gated_attn=gated_attn, mask_type=mask_type, masked_loss=masked_loss, channel_consistent_masking=channel_consistent_masking, ) self.check_module(task="pretrain", params=params) def forecast_full_module(self, params=None, output_hidden_states=False, return_dict=None): config = PatchTSMixerConfig(**params) mdl = PatchTSMixerForPrediction(config) target_val = self.__class__.correct_forecast_output if config.prediction_channel_indices is not None: target_val = self.__class__.correct_sel_forecast_output enc_output = self.__class__.enc_output output = mdl( self.__class__.data, future_values=self.__class__.correct_forecast_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if isinstance(output, tuple): output = PatchTSMixerForPredictionOutput(*output) if config.loss == "mse": self.assertEqual(output.prediction_outputs.shape, target_val.shape) self.assertEqual(output.last_hidden_state.shape, enc_output.shape) if output_hidden_states is True: self.assertEqual(len(output.hidden_states), params["num_layers"]) else: self.assertEqual(output.hidden_states, None) self.assertEqual(output.loss.item() < np.inf, True) if config.loss == "nll": samples = mdl.generate(self.__class__.data) ref_samples = target_val.unsqueeze(1).expand(-1, params["num_parallel_samples"], -1, -1) self.assertEqual(samples.sequences.shape, ref_samples.shape) def test_forecast_full(self): self.check_module(task="forecast", params=self.__class__.params, output_hidden_states=True) # self.forecast_full_module(self.__class__.params, output_hidden_states = True) def test_forecast_full_2(self): params = self.__class__.params.copy() params.update( mode="mix_channel", ) self.forecast_full_module(params, output_hidden_states=True) def test_forecast_full_2_with_return_dict(self): params = self.__class__.params.copy() params.update( mode="mix_channel", ) self.forecast_full_module(params, output_hidden_states=True, return_dict=False) def test_forecast_full_3(self): params = self.__class__.params.copy() params.update( mode="mix_channel", ) self.forecast_full_module(params, output_hidden_states=True) def test_forecast_full_5(self): params = self.__class__.params.copy() params.update( self_attn=True, use_positional_encoding=True, positional_encoding="sincos", ) self.forecast_full_module(params, output_hidden_states=True) def test_forecast_full_4(self): params = self.__class__.params.copy() params.update( mode="mix_channel", prediction_channel_indices=[0, 2], ) self.forecast_full_module(params) def test_forecast_full_distributional(self): params = self.__class__.params.copy() params.update( mode="mix_channel", prediction_channel_indices=[0, 2], loss="nll", distribution_output="normal", ) self.forecast_full_module(params) def test_forecast_full_distributional_2(self): params = self.__class__.params.copy() params.update( mode="mix_channel", prediction_channel_indices=[0, 2], loss="nll", # distribution_output = "normal", ) self.forecast_full_module(params) def test_forecast_full_distributional_3(self): params = self.__class__.params.copy() params.update( mode="mix_channel", # prediction_channel_indices=[0, 2], loss="nll", distribution_output="normal", ) self.forecast_full_module(params) def test_forecast_full_distributional_4(self): params = self.__class__.params.copy() params.update( mode="mix_channel", # prediction_channel_indices=[0, 2], loss="nll", distribution_output="normal", ) self.forecast_full_module(params) def test_classification_head(self): config = PatchTSMixerConfig(**self.__class__.params) head = PatchTSMixerLinearHead( config=config, ) # output = head(self.__class__.enc_output, raw_data = self.__class__.correct_pretrain_output) output = head(self.__class__.enc_output) self.assertEqual(output.shape, self.__class__.correct_classification_output.shape) def test_classification_full(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerForTimeSeriesClassification(config) output = mdl( self.__class__.data, target_values=self.__class__.correct_classification_classes, ) self.assertEqual( output.prediction_outputs.shape, self.__class__.correct_classification_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) def test_classification_full_with_return_dict(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerForTimeSeriesClassification(config) output = mdl( self.__class__.data, target_values=self.__class__.correct_classification_classes, return_dict=False, ) if isinstance(output, tuple): output = PatchTSMixerForTimeSeriesClassificationOutput(*output) self.assertEqual( output.prediction_outputs.shape, self.__class__.correct_classification_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) def test_regression_head(self): config = PatchTSMixerConfig(**self.__class__.params) head = PatchTSMixerLinearHead( config=config, ) output = head(self.__class__.enc_output) self.assertEqual(output.shape, self.__class__.correct_regression_output.shape) def test_regression_full(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerForRegression(config) output = mdl(self.__class__.data, target_values=self.__class__.correct_regression_output) self.assertEqual( output.regression_outputs.shape, self.__class__.correct_regression_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) def test_regression_full_with_return_dict(self): config = PatchTSMixerConfig(**self.__class__.params) mdl = PatchTSMixerForRegression(config) output = mdl( self.__class__.data, target_values=self.__class__.correct_regression_output, return_dict=False, ) if isinstance(output, tuple): output = PatchTSMixerForRegressionOutput(*output) self.assertEqual( output.regression_outputs.shape, self.__class__.correct_regression_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) def test_regression_full_distribute(self): params = self.__class__.params.copy() params.update(loss="nll", distribution_output="normal") config = PatchTSMixerConfig(**params) mdl = PatchTSMixerForRegression(config) output = mdl(self.__class__.data, target_values=self.__class__.correct_regression_output) self.assertEqual( output.regression_outputs[0].shape, self.__class__.correct_regression_output.shape, ) self.assertEqual( output.regression_outputs[1].shape, self.__class__.correct_regression_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) if config.loss == "nll": samples = mdl.generate(self.__class__.data) ref_samples = self.__class__.correct_regression_output.unsqueeze(1).expand( -1, params["num_parallel_samples"], -1 ) self.assertEqual(samples.sequences.shape, ref_samples.shape) def test_regression_full_distribute_2(self): params = self.__class__.params.copy() params.update(loss="nll", distribution_output="student_t") config = PatchTSMixerConfig(**params) mdl = PatchTSMixerForRegression(config) output = mdl(self.__class__.data, target_values=self.__class__.correct_regression_output) self.assertEqual( output.regression_outputs[0].shape, self.__class__.correct_regression_output.shape, ) self.assertEqual( output.regression_outputs[1].shape, self.__class__.correct_regression_output.shape, ) self.assertEqual(output.last_hidden_state.shape, self.__class__.enc_output.shape) self.assertEqual(output.loss.item() < np.inf, True) if config.loss == "nll": samples = mdl.generate(self.__class__.data) ref_samples = self.__class__.correct_regression_output.unsqueeze(1).expand( -1, params["num_parallel_samples"], -1 ) self.assertEqual(samples.sequences.shape, ref_samples.shape)
transformers/tests/models/patchtsmixer/test_modeling_patchtsmixer.py/0
{ "file_path": "transformers/tests/models/patchtsmixer/test_modeling_patchtsmixer.py", "repo_id": "transformers", "token_count": 20492 }
446
# coding=utf-8 # Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Phi model.""" import unittest import pytest from parameterized import parameterized from transformers import PhiConfig, is_torch_available, set_seed from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification, PhiModel, ) from transformers.models.phi.modeling_phi import ( PhiDynamicNTKScalingRotaryEmbedding, PhiLinearScalingRotaryEmbedding, PhiRotaryEmbedding, ) class PhiModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return PhiConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = PhiModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = PhiModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = PhiForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = PhiForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class PhiModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (PhiModel, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (PhiForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": PhiModel, "text-classification": PhiForSequenceClassification, "text-generation": PhiForCausalLM, "token-classification": PhiForTokenClassification, "zero-shot": PhiForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79292/workflows/fa2ba644-8953-44a6-8f67-ccd69ca6a476/jobs/1012905 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Phi def setUp(self): self.model_tester = PhiModelTester(self) self.config_tester = ConfigTester(self, config_class=PhiConfig, hidden_size=37) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Phi,llama->phi def test_phi_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PhiForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Phi,llama->phi def test_phi_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PhiForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Phi,llama->phi def test_phi_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = PhiForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @parameterized.expand([("linear",), ("dynamic",)]) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_rope_scaling_from_config with Llama->Phi def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = PhiModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = PhiModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) # Copied from tests.models.falcon.test_modeling_falcon.FalconModelTest.test_model_rope_scaling with Falcon->Phi def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() hidden_size = config.hidden_size num_heads = config.num_attention_heads head_dim = hidden_size // num_heads scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device # Sanity check original RoPE original_rope = PhiRotaryEmbedding( head_dim, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, ).to(torch_device) original_cos_short, original_sin_short = original_rope(x, short_input_length) original_cos_long, original_sin_long = original_rope(x, long_input_length) torch.testing.assert_close(original_cos_short, original_cos_long[:short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" linear_scaling_rope = PhiLinearScalingRotaryEmbedding( head_dim, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, scaling_factor=scaling_factor, ).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, short_input_length) linear_cos_long, linear_sin_long = linear_scaling_rope(x, long_input_length) torch.testing.assert_close(linear_cos_short, linear_cos_long[:short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[new_position, :], original_cos_long[original_position, :]) torch.testing.assert_close(linear_sin_long[new_position, :], original_sin_long[original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) ntk_scaling_rope = PhiDynamicNTKScalingRotaryEmbedding( head_dim, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, scaling_factor=scaling_factor, ).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, short_input_length) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, long_input_length) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) @require_flash_attn @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @slow # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_flash_attn_2_generate_padding_right with LlamaForCausalLM->PhiForCausalLM,LlamaTokenizer->AutoTokenizer,meta-llama/Llama-2-7b-hf->microsoft/phi-1 def test_flash_attn_2_generate_padding_right(self): """ Overwritting the common test as the test is flaky on tiny models """ model = PhiForCausalLM.from_pretrained( "microsoft/phi-1", load_in_4bit=True, device_map={"": 0}, ) tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1") texts = ["hi", "Hello this is a very long sentence"] tokenizer.padding_side = "right" tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(texts, return_tensors="pt", padding=True).to(0) output_native = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_native = tokenizer.batch_decode(output_native) model = PhiForCausalLM.from_pretrained( "microsoft/phi-1", load_in_4bit=True, device_map={"": 0}, attn_implementation="flash_attention_2" ) output_fa_2 = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_fa_2 = tokenizer.batch_decode(output_fa_2) self.assertListEqual(output_native, output_fa_2) @slow @require_torch class PhiIntegrationTest(unittest.TestCase): def test_model_phi_1_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhiForCausalLM.from_pretrained("microsoft/phi-1").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) # fmt: skip self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4)) def test_model_phi_1_5_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhiForCausalLM.from_pretrained("microsoft/phi-1_5").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) # fmt: skip self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4)) def test_model_phi_2_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhiForCausalLM.from_pretrained("microsoft/phi-2").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[6.4830, 6.1644, 3.4055, 2.2848, 5.4654, 2.8360, 5.5975, 5.5391, 7.3101, 4.2498, 2.5913, 10.3885, 6.4359, 8.7982, 5.6534, 0.5150, 2.7498, 3.1930, 2.4334, 1.7781, 1.5613, 1.3067, 0.8291, 0.5633, 0.6522, 9.8191, 5.5771, 2.7987, 4.2845, 3.7030], [6.0642, 7.8242, 3.4634, 1.9259, 4.3169, 2.0913, 6.0446, 3.6804, 6.6736, 4.0727, 2.1791, 11.4139, 5.6795, 7.5652, 6.2039, 2.7174, 4.3266, 3.6930, 2.8058, 2.6721, 2.3047, 2.0848, 2.0972, 2.0441, 1.3160, 9.2085, 4.5557, 3.0296, 2.6045, 2.4059]]).to(torch_device) # fmt: skip self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-3, rtol=1e-3)) def test_phi_2_generation(self): model = PhiForCausalLM.from_pretrained("microsoft/phi-2") tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2") inputs = tokenizer( "Can you help me write a formal email to a potential business partner proposing a joint venture?", return_tensors="pt", return_attention_mask=False, ) outputs = model.generate(**inputs, max_new_tokens=30) output_text = tokenizer.batch_decode(outputs) EXPECTED_OUTPUT = [ "Can you help me write a formal email to a potential business partner proposing a joint venture?\nInput: Company A: ABC Inc.\nCompany B: XYZ Ltd.\nJoint Venture: A new online platform for e-commerce" ] self.assertListEqual(output_text, EXPECTED_OUTPUT)
transformers/tests/models/phi/test_modeling_phi.py/0
{ "file_path": "transformers/tests/models/phi/test_modeling_phi.py", "repo_id": "transformers", "token_count": 11449 }
447
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import ( check_json_file_has_correct_format, require_essentia, require_librosa, require_scipy, require_tf, require_torch, ) from transformers.utils.import_utils import ( is_essentia_available, is_librosa_available, is_scipy_available, is_torch_available, ) from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin requirements_available = ( is_torch_available() and is_essentia_available() and is_scipy_available() and is_librosa_available() ) if requirements_available: import torch from transformers import Pop2PianoFeatureExtractor class Pop2PianoFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, n_bars=2, sample_rate=22050, use_mel=True, padding_value=0, vocab_size_special=4, vocab_size_note=128, vocab_size_velocity=2, vocab_size_time=100, ): self.parent = parent self.n_bars = n_bars self.sample_rate = sample_rate self.use_mel = use_mel self.padding_value = padding_value self.vocab_size_special = vocab_size_special self.vocab_size_note = vocab_size_note self.vocab_size_velocity = vocab_size_velocity self.vocab_size_time = vocab_size_time def prepare_feat_extract_dict(self): return { "n_bars": self.n_bars, "sample_rate": self.sample_rate, "use_mel": self.use_mel, "padding_value": self.padding_value, "vocab_size_special": self.vocab_size_special, "vocab_size_note": self.vocab_size_note, "vocab_size_velocity": self.vocab_size_velocity, "vocab_size_time": self.vocab_size_time, } @require_torch @require_essentia @require_librosa @require_scipy class Pop2PianoFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Pop2PianoFeatureExtractor if requirements_available else None def setUp(self): self.feat_extract_tester = Pop2PianoFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.use_mel mel_2 = feat_extract_second.use_mel self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.use_mel mel_2 = feat_extract_second.use_mel self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input = np.zeros([1000000], dtype=np.float32) input_features = feature_extractor(speech_input, sampling_rate=16_000, return_tensors="np") self.assertTrue(input_features.input_features.ndim == 3) self.assertEqual(input_features.input_features.shape[-1], 512) self.assertTrue(input_features.beatsteps.ndim == 2) self.assertTrue(input_features.extrapolated_beatstep.ndim == 2) def test_integration(self): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select([0])["audio"] input_speech = [x["array"] for x in speech_samples][0] sampling_rate = [x["sampling_rate"] for x in speech_samples][0] feaure_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") input_features = feaure_extractor( input_speech, sampling_rate=sampling_rate, return_tensors="pt" ).input_features EXPECTED_INPUT_FEATURES = torch.tensor( [[-7.1493, -6.8701, -4.3214], [-5.9473, -5.7548, -3.8438], [-6.1324, -5.9018, -4.3778]] ) self.assertTrue(torch.allclose(input_features[0, :3, :3], EXPECTED_INPUT_FEATURES, atol=1e-4)) def test_attention_mask(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2], sampling_rate=[44_100, 16_000], return_tensors="np", return_attention_mask=True, ) self.assertTrue(hasattr(input_features, "attention_mask")) # check shapes self.assertTrue(input_features["attention_mask"].ndim == 2) self.assertEqual(input_features["attention_mask_beatsteps"].shape[0], 2) self.assertEqual(input_features["attention_mask_extrapolated_beatstep"].shape[0], 2) # check if they are any values except 0 and 1 self.assertTrue(np.max(input_features["attention_mask"]) == 1) self.assertTrue(np.max(input_features["attention_mask_beatsteps"]) == 1) self.assertTrue(np.max(input_features["attention_mask_extrapolated_beatstep"]) == 1) self.assertTrue(np.min(input_features["attention_mask"]) == 0) self.assertTrue(np.min(input_features["attention_mask_beatsteps"]) == 0) self.assertTrue(np.min(input_features["attention_mask_extrapolated_beatstep"]) == 0) def test_batch_feature(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_attention_mask=True, ) self.assertEqual(len(input_features["input_features"].shape), 3) # check shape self.assertEqual(input_features["beatsteps"].shape[0], 3) self.assertEqual(input_features["extrapolated_beatstep"].shape[0], 3) def test_batch_feature_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="np", return_attention_mask=True, ) # check np array or not self.assertEqual(type(input_features["input_features"]), np.ndarray) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) def test_batch_feature_pt(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="pt", return_attention_mask=True, ) # check pt tensor or not self.assertEqual(type(input_features["input_features"]), torch.Tensor) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) @require_tf def test_batch_feature_tf(self): import tensorflow as tf feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_input1 = np.zeros([1_000_000], dtype=np.float32) speech_input2 = np.ones([2_000_000], dtype=np.float32) speech_input3 = np.random.randint(low=0, high=10, size=500_000).astype(np.float32) input_features = feature_extractor( [speech_input1, speech_input2, speech_input3], sampling_rate=[44_100, 16_000, 48_000], return_tensors="tf", return_attention_mask=True, ) # check tf tensor or not self.assertTrue(tf.is_tensor(input_features["input_features"])) # check shape self.assertEqual(len(input_features["input_features"].shape), 3) @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_accepts_tensors_pt(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_accepts_tensors_tf(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_from_list(self): pass @unittest.skip( "Pop2PianoFeatureExtractor does not supports padding externally (while processing audios in batches padding is automatically applied to max_length)" ) def test_padding_from_array(self): pass @unittest.skip(reason="Pop2PianoFeatureExtractor does not support truncation") def test_attention_mask_with_truncation(self): pass @unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation") def test_truncation_from_array(self): pass @unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation") def test_truncation_from_list(self): pass
transformers/tests/models/pop2piano/test_feature_extraction_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_feature_extraction_pop2piano.py", "repo_id": "transformers", "token_count": 4968 }
448
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2Audio model.""" import gc import unittest from io import BytesIO from urllib.request import urlopen import librosa from transformers import ( AutoProcessor, Qwen2AudioConfig, Qwen2AudioForConditionalGeneration, is_torch_available, ) from transformers.testing_utils import ( require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch else: is_torch_greater_or_equal_than_2_0 = False class Qwen2AudioModelTester: def __init__( self, parent, ignore_index=-100, audio_token_index=0, seq_length=7, feat_seq_length=60, text_config={ "model_type": "qwen2", "intermediate_size": 36, "initializer_range": 0.02, "hidden_size": 32, "max_position_embeddings": 52, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 2, "use_labels": True, "use_mrope": False, "vocab_size": 99, }, is_training=True, audio_config={ "model_type": "qwen2_audio_encoder", "d_model": 16, "encoder_attention_heads": 4, "encoder_ffn_dim": 16, "encoder_layers": 2, "num_mel_bins": 80, "max_source_positions": 30, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.audio_token_index = audio_token_index self.text_config = text_config self.audio_config = audio_config self.seq_length = seq_length self.feat_seq_length = feat_seq_length self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.encoder_seq_length = audio_config["max_source_positions"] // 2 + seq_length - 1 def get_config(self): return Qwen2AudioConfig( text_config=self.text_config, audio_config=self.audio_config, ignore_index=self.ignore_index, audio_token_index=self.audio_token_index, ) def prepare_config_and_inputs(self): input_features_values = floats_tensor( [ self.batch_size, self.audio_config["num_mel_bins"], self.feat_seq_length, ] ) config = self.get_config() feature_attention_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.long).to(torch_device) return config, input_features_values, feature_attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_features_values, feature_attention_mask = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) attention_mask[:, :1] = 0 # we are giving 3 audios let's make sure we pass in 3 audios tokens input_ids[:, 1] = config.audio_token_index inputs_dict = { "input_features": input_features_values, "feature_attention_mask": feature_attention_mask, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def create_and_check_qwen2audio_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask): model = Qwen2AudioForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class Qwen2AudioForConditionalGenerationModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `Qwen2AudioForConditionalGeneration`. """ all_model_classes = (Qwen2AudioForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = Qwen2AudioModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2AudioConfig, has_text_modality=False) @unittest.skip(reason="Compile not yet supported because in Qwen2Audio models") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Compile not yet supported because in Qwen2Audio models") def test_sdpa_can_dispatch_on_flash(self): pass @require_torch class Qwen2AudioForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct") def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow def test_small_model_integration_test_single(self): # Let' s make sure we test the preprocessing to replace what is used model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct") url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3" messages = [ { "role": "user", "content": [ {"type": "audio", "audio_url": url}, {"type": "text", "text": "What's that sound?"}, ], } ] raw_audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate) formatted_prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True) inputs = self.processor(text=formatted_prompt, audios=[raw_audio], return_tensors="pt", padding=True) output = model.generate(**inputs, max_new_tokens=32) EXPECTED_INPUT_IDS = torch.tensor( [ [ 151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 14755, 220, 16, 25, 220, 151647, 151646, 151648, 198, 3838, 594, 429, 5112, 30, 151645, 198, 151644, 77091, 198, ] ] ) self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS)) EXPECTED_DECODED_TEXT = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>\nWhat's that sound?<|im_end|>\n<|im_start|>assistant\nIt is the sound of glass breaking.<|im_end|>" self.assertEqual( self.processor.decode(output[0], skip_special_tokens=False), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): # Let' s make sure we test the preprocessing to replace what is used model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct") conversation1 = [ { "role": "user", "content": [ { "type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3", }, {"type": "text", "text": "What's that sound?"}, ], }, {"role": "assistant", "content": "It is the sound of glass shattering."}, { "role": "user", "content": [ { "type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav", }, {"type": "text", "text": "What can you hear?"}, ], }, ] conversation2 = [ { "role": "user", "content": [ { "type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac", }, {"type": "text", "text": "What does the person say?"}, ], }, ] conversations = [conversation1, conversation2] text = [ self.processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) for conversation in conversations ] audios = [] for conversation in conversations: for message in conversation: if isinstance(message["content"], list): for ele in message["content"]: if ele["type"] == "audio": audios.append( librosa.load( BytesIO(urlopen(ele["audio_url"]).read()), sr=self.processor.feature_extractor.sampling_rate, )[0] ) inputs = self.processor(text=text, audios=audios, return_tensors="pt", padding=True) output = model.generate(**inputs, max_new_tokens=32) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nAudio 1: \nWhat's that sound?\nassistant\nIt is the sound of glass shattering.\nuser\nAudio 2: \nWhat can you hear?\nassistant\ncough and throat clearing.", "system\nYou are a helpful assistant.\nuser\nAudio 1: \nWhat does the person say?\nassistant\nThe original content of this audio is: 'Mister Quiller is the apostle of the middle classes and we are glad to welcome his gospel.'", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_multiturn(self): # Let' s make sure we test the preprocessing to replace what is used model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct") messages = [ {"role": "system", "content": "You are a helpful assistant."}, { "role": "user", "content": [ { "type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3", }, {"type": "text", "text": "What's that sound?"}, ], }, {"role": "assistant", "content": "It is the sound of glass shattering."}, { "role": "user", "content": [ { "type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav", }, {"type": "text", "text": "How about this one?"}, ], }, ] formatted_prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True) audios = [] for message in messages: if isinstance(message["content"], list): for ele in message["content"]: if ele["type"] == "audio": audios.append( librosa.load( BytesIO(urlopen(ele["audio_url"]).read()), sr=self.processor.feature_extractor.sampling_rate, )[0] ) inputs = self.processor(text=formatted_prompt, audios=audios, return_tensors="pt", padding=True) output = model.generate(**inputs, max_new_tokens=32, top_k=1) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nAudio 1: \nWhat's that sound?\nassistant\nIt is the sound of glass shattering.\nuser\nAudio 2: \nHow about this one?\nassistant\nThroat clearing.", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py/0
{ "file_path": "transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py", "repo_id": "transformers", "token_count": 7262 }
449
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class ReformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/reformer-crime-and-punishment" tokenizer_class = ReformerTokenizer rust_tokenizer_class = ReformerTokenizerFast test_rust_tokenizer = True test_seq2seq = False test_sentencepiece = True def setUp(self): super().setUp() tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 1_000) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) @unittest.skip(reason="Tokenizer has no padding token") def test_padding_different_model_input_name(self): pass def test_full_tokenizer(self): tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) original_tokenizer_encodings = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import ReformerConfig, ReformerModel # Build sequence first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt") batch_encoded_sequence = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt") config = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) config.axial_pos_shape = encoded_sequence["input_ids"].shape model = ReformerModel(config) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**encoded_sequence) model(**batch_encoded_sequence) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 sequences = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="google/reformer-crime-and-punishment", revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a", padding=False, sequences=sequences, )
transformers/tests/models/reformer/test_tokenization_reformer.py/0
{ "file_path": "transformers/tests/models/reformer/test_tokenization_reformer.py", "repo_id": "transformers", "token_count": 6425 }
450
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import RobertaConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.roberta.modeling_tf_roberta import ( TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaModel, ) class TFRobertaModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRobertaModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRobertaModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFRobertaModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) # Also check the case where encoder outputs are not passed result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRobertaForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFRobertaForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) prediction_scores = result["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFRobertaForCausalLM(config=config) # special to `RobertaEmbeddings` in `Roberta`: # - its `padding_idx` and its effect on `position_ids` # (TFRobertaEmbeddings.create_position_ids_from_input_ids) # - `1` here is `TFRobertaEmbeddings.padding_idx` input_ids = tf.where(input_ids == 1, 2, input_ids) # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFRobertaForCausalLM(config=config) # special to `RobertaEmbeddings` in `Roberta`: # - its `padding_idx` and its effect on `position_ids` # (TFRobertaEmbeddings.create_position_ids_from_input_ids) # - `1` here is `TFRobertaEmbeddings.padding_idx` # avoid `padding_idx` in the past input_ids = tf.where(input_ids == 1, 2, input_ids) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # avoid `padding_idx` in the past input_ids = tf.where(input_ids == 1, 2, input_ids) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFRobertaForCausalLM(config=config) # special to `RobertaEmbeddings` in `Roberta`: # - its `padding_idx` and its effect on `position_ids` # (TFRobertaEmbeddings.create_position_ids_from_input_ids) # - `1` here is `TFRobertaEmbeddings.padding_idx` # avoid `padding_idx` in the past input_ids = tf.where(input_ids == 1, 2, input_ids) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFRobertaForCausalLM(config=config) # special to `RobertaEmbeddings` in `Roberta`: # - its `padding_idx` and its effect on `position_ids` # (TFRobertaEmbeddings.create_position_ids_from_input_ids) # - `1` here is `TFRobertaEmbeddings.padding_idx` # avoid `padding_idx` in the past input_ids = tf.where(input_ids == 1, 2, input_ids) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRobertaForMaskedLM(config=config) result = model([input_ids, input_mask, token_type_ids]) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRobertaForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRobertaForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFRobertaForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFRobertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFRobertaModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFRobertaModel, "fill-mask": TFRobertaForMaskedLM, "question-answering": TFRobertaForQuestionAnswering, "text-classification": TFRobertaForSequenceClassification, "text-generation": TFRobertaForCausalLM, "token-classification": TFRobertaForTokenClassification, "zero-shot": TFRobertaForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFRobertaModelTester(self) self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): """Test the base model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_causal_lm_base_model(self): """Test the base model of the causal LM model is_deocder=True, no cross_attention, no encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): """Test the base model as a decoder (of an encoder-decoder architecture) is_deocder=True + cross_attention + pass encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): """Test the causal LM model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model(*config_and_inputs) def test_causal_lm_model_as_decoder(self): """Test the causal LM model as a decoder""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs) def test_causal_lm_model_past(self): """Test causal LM model with `past_key_values`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs) def test_causal_lm_model_past_with_attn_mask(self): """Test the causal LM model with `past_key_values` and `attention_mask`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_model_past_with_large_inputs(self): """Test the causal LM model with `past_key_values` and a longer decoder sequence length""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): """Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "FacebookAI/roberta-base" model = TFRobertaModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf @require_sentencepiece @require_tokenizers class TFRobertaModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFRobertaForMaskedLM.from_pretrained("FacebookAI/roberta-base") input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = [1, 11, 50265] self.assertEqual(list(output.numpy().shape), expected_shape) # compare the actual values for a slice. expected_slice = tf.constant( [[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4)) @slow def test_inference_no_head(self): model = TFRobertaModel.from_pretrained("FacebookAI/roberta-base") input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] # compare the actual values for a slice. expected_slice = tf.constant( [[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4)) @slow def test_inference_classification_head(self): model = TFRobertaForSequenceClassification.from_pretrained("FacebookAI/roberta-large-mnli") input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = [1, 3] self.assertEqual(list(output.numpy().shape), expected_shape) expected_tensor = tf.constant([[-0.9469, 0.3913, 0.5118]]) self.assertTrue(numpy.allclose(output.numpy(), expected_tensor.numpy(), atol=1e-4))
transformers/tests/models/roberta/test_modeling_tf_roberta.py/0
{ "file_path": "transformers/tests/models/roberta/test_modeling_tf_roberta.py", "repo_id": "transformers", "token_count": 12677 }
451
# coding = utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch RT_DETR model.""" import inspect import math import tempfile import unittest from parameterized import parameterized from transformers import ( RTDetrConfig, RTDetrImageProcessor, RTDetrResNetConfig, is_torch_available, is_vision_available, ) from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import RTDetrForObjectDetection, RTDetrModel if is_vision_available(): from PIL import Image CHECKPOINT = "PekingU/rtdetr_r50vd" # TODO: replace class RTDetrModelTester: def __init__( self, parent, batch_size=3, is_training=True, use_labels=True, n_targets=3, num_labels=10, initializer_range=0.02, layer_norm_eps=1e-5, batch_norm_eps=1e-5, # backbone backbone_config=None, # encoder HybridEncoder encoder_hidden_dim=32, encoder_in_channels=[128, 256, 512], feat_strides=[8, 16, 32], encoder_layers=1, encoder_ffn_dim=64, encoder_attention_heads=2, dropout=0.0, activation_dropout=0.0, encode_proj_layers=[2], positional_encoding_temperature=10000, encoder_activation_function="gelu", activation_function="silu", eval_size=None, normalize_before=False, # decoder RTDetrTransformer d_model=32, num_queries=30, decoder_in_channels=[32, 32, 32], decoder_ffn_dim=64, num_feature_levels=3, decoder_n_points=4, decoder_layers=2, decoder_attention_heads=2, decoder_activation_function="relu", attention_dropout=0.0, num_denoising=0, label_noise_ratio=0.5, box_noise_scale=1.0, learn_initial_query=False, anchor_image_size=None, image_size=64, disable_custom_kernels=True, with_box_refine=True, ): self.parent = parent self.batch_size = batch_size self.num_channels = 3 self.is_training = is_training self.use_labels = use_labels self.n_targets = n_targets self.num_labels = num_labels self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps self.backbone_config = backbone_config self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels self.feat_strides = feat_strides self.encoder_layers = encoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.dropout = dropout self.activation_dropout = activation_dropout self.encode_proj_layers = encode_proj_layers self.positional_encoding_temperature = positional_encoding_temperature self.encoder_activation_function = encoder_activation_function self.activation_function = activation_function self.eval_size = eval_size self.normalize_before = normalize_before self.d_model = d_model self.num_queries = num_queries self.decoder_in_channels = decoder_in_channels self.decoder_ffn_dim = decoder_ffn_dim self.num_feature_levels = num_feature_levels self.decoder_n_points = decoder_n_points self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.decoder_activation_function = decoder_activation_function self.attention_dropout = attention_dropout self.num_denoising = num_denoising self.label_noise_ratio = label_noise_ratio self.box_noise_scale = box_noise_scale self.learn_initial_query = learn_initial_query self.anchor_image_size = anchor_image_size self.image_size = image_size self.disable_custom_kernels = disable_custom_kernels self.with_box_refine = with_box_refine self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) labels.append(target) config = self.get_config() config.num_labels = self.num_labels return config, pixel_values, pixel_mask, labels def get_config(self): hidden_sizes = [10, 20, 30, 40] backbone_config = RTDetrResNetConfig( embeddings_size=10, hidden_sizes=hidden_sizes, depths=[1, 1, 2, 1], out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return RTDetrConfig.from_backbone_configs( backbone_config=backbone_config, encoder_hidden_dim=self.encoder_hidden_dim, encoder_in_channels=hidden_sizes[1:], feat_strides=self.feat_strides, encoder_layers=self.encoder_layers, encoder_ffn_dim=self.encoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, dropout=self.dropout, activation_dropout=self.activation_dropout, encode_proj_layers=self.encode_proj_layers, positional_encoding_temperature=self.positional_encoding_temperature, encoder_activation_function=self.encoder_activation_function, activation_function=self.activation_function, eval_size=self.eval_size, normalize_before=self.normalize_before, d_model=self.d_model, num_queries=self.num_queries, decoder_in_channels=self.decoder_in_channels, decoder_ffn_dim=self.decoder_ffn_dim, num_feature_levels=self.num_feature_levels, decoder_n_points=self.decoder_n_points, decoder_layers=self.decoder_layers, decoder_attention_heads=self.decoder_attention_heads, decoder_activation_function=self.decoder_activation_function, attention_dropout=self.attention_dropout, num_denoising=self.num_denoising, label_noise_ratio=self.label_noise_ratio, box_noise_scale=self.box_noise_scale, learn_initial_query=self.learn_initial_query, anchor_image_size=self.anchor_image_size, image_size=self.image_size, disable_custom_kernels=self.disable_custom_kernels, with_box_refine=self.with_box_refine, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def create_and_check_rt_detr_model(self, config, pixel_values, pixel_mask, labels): model = RTDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model)) def create_and_check_rt_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = RTDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class RTDetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (RTDetrModel, RTDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": RTDetrModel, "object-detection": RTDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "RTDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = RTDetrModelTester(self) self.config_tester = ConfigTester( self, config_class=RTDetrConfig, has_text_modality=False, common_properties=["hidden_size", "num_attention_heads"], ) def test_config(self): self.config_tester.run_common_tests() def test_rt_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rt_detr_model(*config_and_inputs) def test_rt_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rt_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="RTDetr does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="RTDetr does not use test_inputs_embeds_matches_input_ids") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="RTDetr does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="RTDetr does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="RTDetr does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.encoder_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.encoder_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.encoder_attention_heads, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length, ], ) out_len = len(outputs) correct_outlen = 13 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "RTDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [ self.model_tester.decoder_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries, ], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.decoder_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # RTDetr should maintin encoder_hidden_states output added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.encoder_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.encoder_attention_heads, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length, ], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[1].shape[-2:]), [ self.model_tester.image_size // self.model_tester.feat_strides[-1], self.model_tester.image_size // self.model_tester.feat_strides[-1], ], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1 ) self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.num_queries, self.model_tester.d_model], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the first output since last_hidden_state is the first item output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.backbone_config = None config.use_timm_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "RTDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propogated to backbone self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) else: # Confirm out_indices was propogated to backbone self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_hf_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Load a pretrained HF checkpoint as backbone config.backbone = "microsoft/resnet-18" config.backbone_config = None config.use_timm_backbone = False config.use_pretrained_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "RTDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propogated to backbone self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) else: # Confirm out_indices was propogated to backbone self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.initializer_bias_prior_prob = 0.2 bias_value = -1.3863 # log_e ((1 - 0.2) / 0.2) failed_cases = [] for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "RTDetrConvEncoder": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if ("class_embed" in name and "bias" in name) or "enc_score_head.bias" in name: bias_tensor = torch.full_like(param.data, bias_value) if not torch.allclose(param.data, bias_tensor, atol=1e-4): failed_cases.append( f"Parameter {name} of model {model_class} seems not properly initialized. " f"Biases should be initialized to {bias_value}, got {param.data}" ) elif ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name or "enc_score_head.weight" in name or ("class_embed" in name and "weight" in name) or name in backbone_params ): continue else: mean = param.data.mean() round_mean = (mean * 1e9).round() / 1e9 round_mean = round_mean.item() if round_mean not in [0.0, 1.0]: failed_cases.append( f"Parameter {name} of model {model_class} seems not properly initialized. " f"Mean is {round_mean}, but should be in [0, 1]" ) message = "\n" + "\n".join(failed_cases) self.assertTrue(not failed_cases, message) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_gpu @slow def test_inference_with_different_dtypes(self, torch_dtype_str): torch_dtype = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, }[torch_dtype_str] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device).to(torch_dtype) model.eval() for key, tensor in inputs_dict.items(): if tensor.dtype == torch.float32: inputs_dict[key] = tensor.to(torch_dtype) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_gpu @slow def test_inference_equivalence_for_static_and_dynamic_anchors(self, torch_dtype_str): torch_dtype = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, }[torch_dtype_str] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() h, w = inputs_dict["pixel_values"].shape[-2:] # convert inputs to the desired dtype for key, tensor in inputs_dict.items(): if tensor.dtype == torch.float32: inputs_dict[key] = tensor.to(torch_dtype) for model_class in self.all_model_classes: with tempfile.TemporaryDirectory() as tmpdirname: model_class(config).save_pretrained(tmpdirname) model_static = model_class.from_pretrained( tmpdirname, anchor_image_size=[h, w], device_map=torch_device, torch_dtype=torch_dtype ).eval() model_dynamic = model_class.from_pretrained( tmpdirname, anchor_image_size=None, device_map=torch_device, torch_dtype=torch_dtype ).eval() self.assertIsNotNone(model_static.config.anchor_image_size) self.assertIsNone(model_dynamic.config.anchor_image_size) with torch.no_grad(): outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class)) outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue( torch.allclose( outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4 ), f"Max diff: {(outputs_static.last_hidden_state - outputs_dynamic.last_hidden_state).abs().max()}", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class RTDetrModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None def test_inference_object_detection_head(self): model = RTDetrForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape_logits = torch.Size((1, 300, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [ [-4.64763879776001, -5.001153945922852, -4.978509902954102], [-4.159348487854004, -4.703853607177734, -5.946484565734863], [-4.437461853027344, -4.65836238861084, -6.235235691070557], ] ).to(torch_device) expected_boxes = torch.tensor( [ [0.1688060760498047, 0.19992263615131378, 0.21225441992282867], [0.768376350402832, 0.41226309537887573, 0.4636859893798828], [0.25953856110572815, 0.5483334064483643, 0.4777486026287079], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, 300, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.0, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor( [0.9703017473220825, 0.9599503874778748, 0.9575679302215576, 0.9506784677505493], device=torch_device ) expected_labels = [57, 15, 15, 65] expected_slice_boxes = torch.tensor( [ [0.13774872, 0.37821293, 640.13074, 476.21088], [343.38132, 24.276838, 640.1404, 371.49573], [13.225126, 54.179348, 318.98422, 472.2207], [40.114475, 73.44104, 175.9573, 118.48469], ], device=torch_device, ) self.assertTrue(torch.allclose(results["scores"][:4], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][:4], expected_slice_boxes, atol=1e-4))
transformers/tests/models/rt_detr/test_modeling_rt_detr.py/0
{ "file_path": "transformers/tests/models/rt_detr/test_modeling_rt_detr.py", "repo_id": "transformers", "token_count": 15214 }
452
# coding=utf-8 # Copyright 2021 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_deterministic_for_xpu, require_torch, slow, torch_device from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..speech_to_text.test_modeling_speech_to_text import Speech2TextModelTester from ..wav2vec2.test_modeling_wav2vec2 import Wav2Vec2ModelTester if is_torch_available(): import numpy as np import torch from transformers import ( BertLMHeadModel, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Model, ) from transformers.modeling_outputs import BaseModelOutput from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextEncoder @require_torch class EncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_encoder_decoder_model_from_pretrained_configs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = SpeechEncoderDecoderModel(encoder_decoder_config) enc_dec_model.to(torch_device) enc_dec_model.eval() self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_with_inputs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): inputs = input_values if input_features is None else input_features encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) outputs_encoder_decoder_kwarg = enc_dec_model( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder_kwarg["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = SpeechEncoderDecoderModel.from_pretrained(tmpdirname) enc_dec_model.to(torch_device) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_save_and_load_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname: enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname) enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname) SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=encoder_tmp_dirname, decoder_pretrained_model_name_or_path=decoder_tmp_dirname, ) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, input_values=None, input_features=None, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) inputs = input_values if input_features is None else input_features encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model.encoder._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate( self, config, decoder_config, input_values=None, input_features=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) # make sure EOS token is set to None to prevent early stopping of generation if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None if hasattr(enc_dec_model.generation_config, "eos_token_id"): enc_dec_model.generation_config.eos_token_id = None inputs = input_values if input_features is None else input_features # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( inputs, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(generated_output.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_with_inputs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_with_inputs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_save_and_load_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def test_training_gradient_checkpointing(self): inputs_dict = self.prepare_config_and_inputs() encoder_model, decoder_model = self.get_encoder_decoder_model( inputs_dict["config"], inputs_dict["decoder_config"] ) model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 model.config.pad_token_id = 0 model_inputs = { "attention_mask": inputs_dict["attention_mask"], "labels": inputs_dict["labels"], "decoder_input_ids": inputs_dict["decoder_input_ids"], } inputs = inputs_dict["input_features"] if "input_features" in inputs_dict else inputs_dict["input_values"] loss = model(inputs, **model_inputs).loss loss.backward() @slow @require_deterministic_for_xpu def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = SpeechEncoderDecoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class Wav2Vec2BertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-base-960h", "google-bert/bert-base-cased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_values": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) wav2vec2_model_tester = Wav2Vec2ModelTester(self) encoder_config_and_inputs = wav2vec2_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_values, input_mask, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @require_torch class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/s2t-small-librispeech-asr", "google-bert/bert-base-cased" ) batch_size = 13 input_features = floats_tensor([batch_size, 7, 80], scale=1.0) attention_mask = random_attention_mask([batch_size, 7]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_features": input_features, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Speech2TextEncoder(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) speech2text_model_tester = Speech2TextModelTester(self) encoder_config_and_inputs = speech2text_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() config, inputs = encoder_config_and_inputs input_features = inputs["input_features"] input_mask = inputs["attention_mask"] ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "input_features": input_features, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder") def test_encoder_decoder_model_from_pretrained_configs(self): pass @unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder") def test_save_and_load_from_pretrained(self): pass @require_deterministic_for_xpu @unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder") def test_real_model_save_load_from_pretrained(self): pass
transformers/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py/0
{ "file_path": "transformers/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py", "repo_id": "transformers", "token_count": 11053 }
453
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import SqueezeBertTokenizer, SqueezeBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class SqueezeBertTokenizationTest(BertTokenizationTest): tokenizer_class = SqueezeBertTokenizer rust_tokenizer_class = SqueezeBertTokenizerFast test_rust_tokenizer = True from_pretrained_id = "squeezebert/squeezebert-uncased" def get_rust_tokenizer(self, **kwargs): return SqueezeBertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) @slow def test_sequence_builders(self): tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-mnli-headless") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ]
transformers/tests/models/squeezebert/test_tokenization_squeezebert.py/0
{ "file_path": "transformers/tests/models/squeezebert/test_tokenization_squeezebert.py", "repo_id": "transformers", "token_count": 669 }
454
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Swin2SR model.""" import unittest from transformers import Swin2SRConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Swin2SRForImageSuperResolution, Swin2SRModel if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor class Swin2SRModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=1, num_channels=3, num_channels_out=1, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=False, upscale=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_channels_out = num_channels_out self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.upscale = upscale # here we set some attributes to make tests pass self.num_hidden_layers = len(depths) self.hidden_size = embed_dim self.seq_length = (image_size // patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Swin2SRConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_channels_out=self.num_channels_out, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, upscale=self.upscale, ) def create_and_check_model(self, config, pixel_values, labels): model = Swin2SRModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.embed_dim, self.image_size, self.image_size) ) def create_and_check_for_image_super_resolution(self, config, pixel_values, labels): model = Swin2SRForImageSuperResolution(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_image_size = self.image_size * self.upscale self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels_out, expected_image_size, expected_image_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = Swin2SRModelTester(self) self.config_tester = ConfigTester( self, config_class=Swin2SRConfig, embed_dim=37, has_text_modality=False, common_properties=["image_size", "patch_size", "num_channels"], ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_super_resolution(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_super_resolution(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Swin2SR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @slow def test_model_from_pretrained(self): model_name = "caidas/swin2SR-classical-sr-x2-64" model = Swin2SRModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwriting because of `logit_scale` parameter def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "logit_scale" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) @require_vision @require_torch @slow class Swin2SRModelIntegrationTest(unittest.TestCase): def test_inference_image_super_resolution_head(self): processor = Swin2SRImageProcessor() model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64").to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 3, 976, 1296]) self.assertEqual(outputs.reconstruction.shape, expected_shape) expected_slice = torch.tensor( [[0.5458, 0.5546, 0.5638], [0.5526, 0.5565, 0.5651], [0.5396, 0.5426, 0.5621]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-4)) def test_inference_fp16(self): processor = Swin2SRImageProcessor() model = Swin2SRForImageSuperResolution.from_pretrained( "caidas/swin2SR-classical-sr-x2-64", torch_dtype=torch.float16 ).to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt").to(model.dtype).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 3, 976, 1296]) self.assertEqual(outputs.reconstruction.shape, expected_shape) expected_slice = torch.tensor( [[0.5454, 0.5542, 0.5640], [0.5518, 0.5562, 0.5649], [0.5391, 0.5425, 0.5620]], dtype=model.dtype ).to(torch_device) self.assertTrue(torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/swin2sr/test_modeling_swin2sr.py/0
{ "file_path": "transformers/tests/models/swin2sr/test_modeling_swin2sr.py", "repo_id": "transformers", "token_count": 6141 }
455
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest from transformers import UMT5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace if is_torch_available(): import torch from transformers import ( AutoTokenizer, UMT5EncoderModel, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering, UMT5ForSequenceClassification, UMT5ForTokenClassification, UMT5Model, ) # Copied from test.models.t5.test_modeling_t5.T5ModelTester with T5->UMT5 class UMT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=False, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return UMT5Config.from_pretrained("google/umt5-base") def prepare_inputs_dict( self, config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input input_ids = input_ids.clamp(self.pad_token_id + 2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() config.encoder_attention_heads = config.num_attention_heads input_dict = self.prepare_inputs_dict(config, input_ids, decoder_input_ids) return config, input_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_pipeline_config(self): return UMT5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return UMT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_model_fp16_forward( self, config, input_dict, ): model = UMT5Model(config=config).to(torch_device).half().eval() output = model(**input_dict)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_sequence_classification_head( self, config, input_dict, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model(**input_dict, labels=labels) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) @require_torch class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (UMT5Model, UMT5ForConditionalGeneration, UMT5ForSequenceClassification, UMT5ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (UMT5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": UMT5Model, "question-answering": UMT5ForQuestionAnswering, "summarization": UMT5ForConditionalGeneration, "text-classification": UMT5ForSequenceClassification, "text2text-generation": UMT5ForConditionalGeneration, "translation": UMT5ForConditionalGeneration, "zero-shot": UMT5ForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = True test_torchscript = True # The small UMT5 model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = UMT5ModelTester(self) # `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: self.skipTest(reason="torch fx is not available or not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "UMT5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() # UMT5ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (UMT5Model, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = UMT5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/t5_test.onnx", export_params=True, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] model = UMT5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=True, return_dict_in_generate=True, **head_masks, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTester with T5->UMT5 class UMT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return UMT5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = UMT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = UMT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = UMT5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForTokenClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTest with T5->UMT5 class UMT5EncoderOnlyModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (UMT5EncoderModel, UMT5ForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True pipeline_model_mapping = ( { "token-classification": UMT5ForTokenClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (UMT5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = UMT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=UMT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) @require_torch @require_sentencepiece @require_tokenizers class Umt5IntegrationTest(unittest.TestCase): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_integration_test(self): """ For comparison run the kaggle notbook available here : https://www.kaggle.com/arthurzucker/umt5-inference """ model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=True).to(torch_device) tokenizer = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=False, legacy=False) input_text = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] input_ids = tokenizer(input_text, return_tensors="pt", padding=True).input_ids # fmt: off EXPECTED_IDS = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_close(input_ids, EXPECTED_IDS) generated_ids = model.generate(input_ids.to(torch_device)) EXPECTED_FILLING = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] filling = tokenizer.batch_decode(generated_ids) self.assertEqual(filling, EXPECTED_FILLING)
transformers/tests/models/umt5/test_modeling_umt5.py/0
{ "file_path": "transformers/tests/models/umt5/test_modeling_umt5.py", "repo_id": "transformers", "token_count": 15570 }
456
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VisualBERT model.""" import copy import unittest from transformers import VisualBertConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, VisualBertModel, ) class VisualBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, visual_seq_length=5, is_training=True, use_attention_mask=True, use_visual_attention_mask=True, use_token_type_ids=True, use_visual_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, visual_embedding_dim=20, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.visual_seq_length = visual_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_visual_attention_mask = use_visual_attention_mask self.use_token_type_ids = use_token_type_ids self.use_visual_token_type_ids = use_visual_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.visual_embedding_dim = visual_embedding_dim self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_config(self): return VisualBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, visual_embedding_dim=self.visual_embedding_dim, num_labels=self.num_labels, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) visual_embeds = floats_tensor([self.batch_size, self.visual_seq_length, self.visual_embedding_dim]) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor([self.batch_size, self.visual_seq_length], self.type_vocab_size) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } def prepare_config_and_inputs_for_pretraining(self): masked_lm_labels = None sentence_image_labels = None if self.use_labels: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length + self.visual_seq_length], self.vocab_size) sentence_image_labels = ids_tensor( [self.batch_size], self.type_sequence_label_size, ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": masked_lm_labels, "sentence_image_labels": sentence_image_labels}) return config, input_dict def prepare_config_and_inputs_for_multiple_choice(self): input_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.vocab_size) visual_embeds = floats_tensor( [self.batch_size, self.num_choices, self.visual_seq_length, self.visual_embedding_dim] ) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones( (self.batch_size, self.num_choices, self.seq_length), dtype=torch.long, device=torch_device ) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.num_choices, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor( [self.batch_size, self.num_choices, self.visual_seq_length], self.type_vocab_size ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, "labels": labels, } def prepare_config_and_inputs_for_vqa(self): vqa_labels = None if self.use_labels: vqa_labels = floats_tensor([self.batch_size, self.num_labels]) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": vqa_labels}) return config, input_dict def prepare_config_and_inputs_for_nlvr(self): nlvr_labels = None if self.use_labels: nlvr_labels = ids_tensor([self.batch_size], self.num_labels) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": nlvr_labels}) return config, input_dict def prepare_config_and_inputs_for_flickr(self): region_to_phrase_position = torch.cat( ( ids_tensor([self.batch_size, self.seq_length], self.visual_seq_length), torch.ones(self.batch_size, self.visual_seq_length, dtype=torch.long, device=torch_device) * -1, ), dim=-1, ) flickr_labels = None if self.use_labels: flickr_labels = floats_tensor( [self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length] ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"region_to_phrase_position": region_to_phrase_position, "labels": flickr_labels}) return config, input_dict def create_and_check_model(self, config, input_dict): model = VisualBertModel(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.hidden_size), ) def create_and_check_for_pretraining(self, config, input_dict): model = VisualBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.prediction_logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.vocab_size), ) def create_and_check_for_vqa(self, config, input_dict): model = VisualBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice(self, config, input_dict): model = VisualBertForMultipleChoice(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_nlvr(self, config, input_dict): model = VisualBertForVisualReasoning(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_flickr(self, config, input_dict): model = VisualBertForRegionToPhraseAlignment(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length) ) @require_torch class VisualBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( VisualBertModel, VisualBertForMultipleChoice, VisualBertForVisualReasoning, VisualBertForRegionToPhraseAlignment, VisualBertForQuestionAnswering, VisualBertForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {} test_torchscript = False test_pruning = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VisualBertForMultipleChoice: for key in inputs_dict.keys(): value = inputs_dict[key] if isinstance(value, torch.Tensor) and value.ndim > 1: if key != "visual_embeds": inputs_dict[key] = ( inputs_dict[key].unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() ) else: inputs_dict[key] = ( inputs_dict[key] .unsqueeze(1) .expand(-1, self.model_tester.num_choices, -1, self.model_tester.visual_embedding_dim) .contiguous() ) elif model_class == VisualBertForRegionToPhraseAlignment: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["region_to_phrase_position"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) if return_labels: if model_class == VisualBertForMultipleChoice: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == VisualBertForPreTraining: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["labels"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) inputs_dict["sentence_image_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) # Flickr expects float labels elif model_class == VisualBertForRegionToPhraseAlignment: batch_size = self.model_tester.batch_size total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length inputs_dict["labels"] = torch.ones( ( batch_size, total_length, self.model_tester.visual_seq_length, ), dtype=torch.float, device=torch_device, ) # VQA expects float labels elif model_class == VisualBertForQuestionAnswering: inputs_dict["labels"] = torch.ones( (self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.float, device=torch_device, ) elif model_class == VisualBertForVisualReasoning: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = VisualBertModelTester(self) self.config_tester = ConfigTester(self, config_class=VisualBertConfig, hidden_size=37) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) visual_seq_len = getattr(self.model_tester, "visual_seq_length", None) encoder_seq_length = (seq_len if seq_len is not None else 0) + ( visual_seq_len if visual_seq_len is not None else 0 ) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length + self.model_tester.visual_seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_model_for_vqa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_vqa() self.model_tester.create_and_check_for_vqa(*config_and_inputs) def test_model_for_nlvr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_nlvr() self.model_tester.create_and_check_for_nlvr(*config_and_inputs) def test_model_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_multiple_choice() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_model_for_flickr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr() self.model_tester.create_and_check_for_flickr(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "uclanlp/visualbert-vqa" model = VisualBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class VisualBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_vqa_coco_pre(self): model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) vocab_size = 30522 expected_shape = torch.Size((1, 16, vocab_size)) self.assertEqual(output.prediction_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]] ) self.assertTrue(torch.allclose(output.prediction_logits[:, :3, :3], expected_slice, atol=1e-4)) expected_shape_2 = torch.Size((1, 2)) self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2) expected_slice_2 = torch.tensor([[0.7393, 0.1754]]) self.assertTrue(torch.allclose(output.seq_relationship_logits, expected_slice_2, atol=1e-4)) @slow def test_inference_vqa(self): model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 3129)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]] ) self.assertTrue(torch.allclose(output.logits[:, :10], expected_slice, atol=1e-4)) @slow def test_inference_nlvr(self): model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 1024), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 2)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-1.1436, 0.8900]]) self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4)) @slow def test_inference_vcr(self): model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr") input_ids = torch.tensor([[[1, 2, 3, 4, 5, 6] for i in range(4)]], dtype=torch.long) attention_mask = torch.ones_like(input_ids) token_type_ids = torch.ones_like(input_ids) visual_embeds = torch.ones(size=(1, 4, 10, 512), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_attention_mask = torch.ones_like(visual_token_type_ids) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 4)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]]) self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
transformers/tests/models/visual_bert/test_modeling_visual_bert.py/0
{ "file_path": "transformers/tests/models/visual_bert/test_modeling_visual_bert.py", "repo_id": "transformers", "token_count": 13985 }
457
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch XCLIP model.""" import inspect import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import XCLIPModel, XCLIPTextModel, XCLIPVisionModel if is_vision_available(): from transformers import XCLIPProcessor class XCLIPVisionModelTester: def __init__( self, parent, batch_size=8, image_size=30, patch_size=2, num_channels=3, num_frames=8, # important; the batch size * time must be divisible by the number of frames is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, mit_hidden_size=64, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_frames = num_frames self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.mit_hidden_size = mit_hidden_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size * self.num_frames, self.num_channels, self.image_size, self.image_size] ) config = self.get_config() return config, pixel_values def get_config(self): return XCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, mit_hidden_size=self.mit_hidden_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = XCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size * self.num_frames, num_patches + 1, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_frames, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as X-CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (XCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = XCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=XCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="X-CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "microsoft/xclip-base-patch32" model = XCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue print("Model class:", model_class) config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # we add 1 here due to the special message token in X-CLIP's vision encoder seq_len = getattr(self.model_tester, "seq_length", None) + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(outputs.attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): test = self._prepare_for_class(inputs_dict, model_class) for k, v in test.items(): if isinstance(v, torch.Tensor): print(k, v.shape) else: print(k, v) _ = model(**self._prepare_for_class(inputs_dict, model_class)) class XCLIPTextModelTester: def __init__( self, parent, batch_size=8, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return XCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = XCLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (XCLIPTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = XCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=XCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="X-CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "microsoft/xclip-base-patch32" model = XCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class XCLIPModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, projection_dim=64, mit_hidden_size=64, is_training=True, ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.projection_dim = projection_dim self.mit_hidden_size = mit_hidden_size self.text_model_tester = XCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = XCLIPVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, _ = self.vision_model_tester.prepare_config_and_inputs() pixel_values = floats_tensor( [ self.vision_model_tester.batch_size, self.vision_model_tester.num_frames, self.vision_model_tester.num_channels, self.vision_model_tester.image_size, self.vision_model_tester.image_size, ] ) config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return XCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=self.projection_dim, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = XCLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_video.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size), ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": XCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False maxdiff = None def setUp(self): self.model_tester = XCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="XCLIPModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="XCLIPModel does not support feedforward chunking") def test_feed_forward_chunking(self): pass # override as the `logit_scale`, `prompts_generator.alpha` parameters require special treatment def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif name == "prompts_generator.alpha": self.assertAlmostEqual(param.data.mean().item(), model.config.prompt_alpha) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # X-CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save XCLIPConfig and check if we can load XCLIPVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = XCLIPVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save XCLIPConfig and check if we can load XCLIPTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = XCLIPTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "microsoft/xclip-base-patch32" model = XCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on a spaghetti video def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_vision @require_torch class XCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "microsoft/xclip-base-patch32" model = XCLIPModel.from_pretrained(model_name).to(torch_device) processor = XCLIPProcessor.from_pretrained(model_name) video = prepare_video() inputs = processor( text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_video.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[14.0181, 20.2771, 14.4776]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_video, expected_logits, atol=1e-3))
transformers/tests/models/x_clip/test_modeling_x_clip.py/0
{ "file_path": "transformers/tests/models/x_clip/test_modeling_x_clip.py", "repo_id": "transformers", "token_count": 12611 }
458
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import XLMRobertaXLConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_sdpa, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, ) from transformers.models.xlm_roberta_xl.modeling_xlm_roberta_xl import ( XLMRobertaXLEmbeddings, create_position_ids_from_input_ids, ) class XLMRobertaXLModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return XLMRobertaXLConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = XLMRobertaXLModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = XLMRobertaXLForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = XLMRobertaXLForCausalLM(config=config).to(torch_device).eval() # make sure that ids don't start with pad token mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) # make sure that ids don't start with pad token mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = XLMRobertaXLForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = XLMRobertaXLForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class XLMRobertaXLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLModel, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (XLMRobertaXLForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": XLMRobertaXLModel, "fill-mask": XLMRobertaXLForMaskedLM, "question-answering": XLMRobertaXLForQuestionAnswering, "text-classification": XLMRobertaXLForSequenceClassification, "text-generation": XLMRobertaXLForCausalLM, "token-classification": XLMRobertaXLForTokenClassification, "zero-shot": XLMRobertaXLForSequenceClassification, } if is_torch_available() else {} ) model_split_percents = [0.5, 0.85, 0.95] # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def setUp(self): self.model_tester = XLMRobertaXLModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMRobertaXLConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = XLMRobertaXLEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = XLMRobertaXLEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) # TODO: Remove this and use the parent method (in common tests) once XLM RoBERTa XL supports low_cpu_mem_usage=True. @require_torch_sdpa @slow # Copied from tests.test_modeling_common.ModelTesterMixin.test_eager_matches_sdpa_generate def test_eager_matches_sdpa_generate(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 if len(self.all_generative_model_classes) == 0: self.skipTest(f"{self.__class__.__name__} tests a model that does support generate: skipping this test") for model_class in self.all_generative_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # Ignore copy model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=False, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") # Ignore copy model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=False, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") # Just test that a large cache works as expected res_eager = model_eager.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) res_sdpa = model_sdpa.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) self.assertTrue(torch.allclose(res_eager, res_sdpa)) @require_torch class XLMRobertaModelXLIntegrationTest(unittest.TestCase): @slow def test_xlm_roberta_xl(self): model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xl").to(torch_device) input_ids = torch.tensor( [[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device ) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 2560)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[0.0110, 0.0605, 0.0354, 0.0689, 0.0066, 0.0691, 0.0302, 0.0412, 0.0860, 0.0036, 0.0405, 0.0170]], device=torch_device, ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @unittest.skip(reason="Model is too large to be tested on the CI") def test_xlm_roberta_xxl(self): model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xxl").to(torch_device) input_ids = torch.tensor( [[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device ) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 4096)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[0.0046, 0.0146, 0.0227, 0.0126, 0.0219, 0.0175, -0.0101, 0.0006, 0.0124, 0.0209, -0.0063, 0.0096]], device=torch_device, ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
transformers/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py/0
{ "file_path": "transformers/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py", "repo_id": "transformers", "token_count": 12097 }
459
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, get_wsd_schedule, ) def unwrap_schedule(scheduler, num_steps=10): lrs = [] for _ in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() return lrs def unwrap_and_save_reload_schedule(scheduler, num_steps=10): lrs = [] for step in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: file_name = os.path.join(tmpdirname, "schedule.bin") torch.save(scheduler.state_dict(), file_name) state_dict = torch.load(file_name) scheduler.load_state_dict(state_dict) return lrs @require_torch class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def test_adam_w(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0) for _ in range(100): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) def test_adafactor(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = Adafactor( params=[w], lr=1e-2, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) for _ in range(1000): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) @require_torch class ScheduleInitTest(unittest.TestCase): m = nn.Linear(50, 50) if is_torch_available() else None optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None num_steps = 10 def assertListAlmostEqual(self, list1, list2, tol, msg=None): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol, msg=msg) def test_schedulers(self): common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) scheds = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), get_wsd_schedule: ( {"num_warmup_steps": 2, "num_stable_steps": 2, "num_decay_steps": 3, "min_lr_ratio": 0.1}, [0.0, 5.0, 10.0, 10.0, 10.0, 7.75, 3.25, 1.0, 1.0, 1.0], ), } for scheduler_func, data in scheds.items(): kwargs, expected_learning_rates = data scheduler = scheduler_func(self.optimizer, **kwargs) self.assertEqual(len([scheduler.get_lr()[0]]), 1) lrs_1 = unwrap_schedule(scheduler, self.num_steps) self.assertListAlmostEqual( lrs_1, expected_learning_rates, tol=1e-2, msg=f"failed for {scheduler_func} in normal scheduler", ) scheduler = scheduler_func(self.optimizer, **kwargs) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(scheduler) # wrap to test picklability of the schedule lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload") def test_get_scheduler(self): test_params = [ { "name": "warmup_stable_decay", "optimizer": self.optimizer, "num_warmup_steps": 2, "scheduler_specific_kwargs": {"num_stable_steps": 1, "num_decay_steps": 3}, }, { "name": "warmup_stable_decay", "optimizer": self.optimizer, "num_warmup_steps": 2, "num_training_steps": 10, "scheduler_specific_kwargs": {"num_stable_steps": 1, "num_decay_steps": 3}, }, {"name": "cosine", "optimizer": self.optimizer, "num_warmup_steps": 2, "num_training_steps": 10}, ] for param in test_params: self.assertTrue(get_scheduler(**param), msg=f"failed for {param['name']} in get_scheduler") class LambdaScheduleWrapper: """See https://github.com/huggingface/transformers/issues/21689""" def __init__(self, fn): self.fn = fn def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) @classmethod def wrap_scheduler(cls, scheduler): scheduler.lr_lambdas = list(map(cls, scheduler.lr_lambdas))
transformers/tests/optimization/test_optimization.py/0
{ "file_path": "transformers/tests/optimization/test_optimization.py", "repo_id": "transformers", "token_count": 4041 }
460
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict import numpy as np from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"hash": hashimage(mask), "shape": shape} @is_pipeline_test @require_vision @require_torch class MaskGenerationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) tf_model_mapping = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): image_segmenter = MaskGenerationPipeline(model=model, image_processor=processor, torch_dtype=torch_dtype) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] @unittest.skip(reason="TODO @Arthur: Implement me") def run_pipeline_test(self, mask_generator, examples): pass @require_tf @unittest.skip(reason="Image segmentation not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_small_model_pt(self): image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge") outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ], ) # fmt: on @require_torch @slow def test_threshold(self): model_id = "facebook/sam-vit-huge" image_segmenter = pipeline("mask-generation", model=model_id) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 ) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053}, ], )
transformers/tests/pipelines/test_pipelines_mask_generation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_mask_generation.py", "repo_id": "transformers", "token_count": 3341 }
461
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_torch class ZeroShotObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection", torch_dtype=torch_dtype, ) examples = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def run_pipeline_test(self, object_detector, examples): outputs = object_detector(examples[0], threshold=0.0) n = len(outputs) self.assertGreater(n, 0) self.assertEqual( outputs, [ { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, } for i in range(n) ], ) @require_tf @unittest.skip(reason="Zero Shot Object Detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) outputs = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png", candidate_labels=["cat", "remote", "couch"], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ], ) outputs = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ], ) @require_torch @slow def test_large_model_pt(self): object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ) outputs = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ], ) @require_tf @unittest.skip(reason="Zero Shot Object Detection not implemented in TF") def test_large_model_tf(self): pass @require_torch @slow def test_threshold(self): threshold = 0.2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], threshold=threshold, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ], ) @require_torch @slow def test_top_k(self): top_k = 2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], top_k=top_k, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ], )
transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py", "repo_id": "transformers", "token_count": 5112 }
462
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4) ).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): """ Test the serialization of the model and the loading of the quantized weights works with another config file """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.check_quantized_layers_type(self.quantized_model, "cuda-old") # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): """ Test GPTQ model with exllama kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. """ prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): """ Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) # fail when run all together @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
transformers/tests/quantization/gptq/test_gptq.py/0
{ "file_path": "transformers/tests/quantization/gptq/test_gptq.py", "repo_id": "transformers", "token_count": 7206 }
463
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import load_dataset, load_metric import transformers from transformers import ( # Trainer,; TrainingArguments, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, default_data_collator, set_seed, ) # Will import SageMaker Model parallelism specific Trainer from transformers.sagemaker import SageMakerTrainer as Trainer from transformers.sagemaker import SageMakerTrainingArguments as TrainingArguments from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.4.2") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if training_args.should_log: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset("nyu-mll/glue", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files datasets = load_dataset("csv", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: " f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets and "validation_matched" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in datasets and "test_matched" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: checkpoint = model_args.model_name_or_path train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Test ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") test_datasets.append(datasets["test_mismatched"]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset = test_dataset.remove_columns("label") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_test_file, "w") as writer: logger.info(f"***** Test results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py/0
{ "file_path": "transformers/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py", "repo_id": "transformers", "token_count": 9577 }
464
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import tempfile try: from typing import Unpack except ImportError: from typing_extensions import Unpack import unittest import numpy as np from transformers import CLIPTokenizerFast, ProcessorMixin from transformers.models.auto.processing_auto import processor_class_from_name from transformers.testing_utils import ( check_json_file_has_correct_format, require_tokenizers, require_torch, require_vision, ) from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor @require_torch @require_vision @require_torch class ProcessorTesterMixin: processor_class = None def prepare_processor_dict(self): return {} def get_component(self, attribute, **kwargs): assert attribute in self.processor_class.attributes component_class_name = getattr(self.processor_class, f"{attribute}_class") if isinstance(component_class_name, tuple): component_class_name = component_class_name[0] component_class = processor_class_from_name(component_class_name) component = component_class.from_pretrained(self.tmpdirname, **kwargs) # noqa if attribute == "tokenizer" and not component.pad_token: component.pad_token = "[TEST_PAD]" return component def prepare_components(self): components = {} for attribute in self.processor_class.attributes: component = self.get_component(attribute) components[attribute] = component return components def get_processor(self): components = self.prepare_components() processor = self.processor_class(**components, **self.prepare_processor_dict()) return processor @require_vision def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_processor_to_json_string(self): processor = self.get_processor() obj = json.loads(processor.to_json_string()) for key, value in self.prepare_processor_dict().items(): self.assertEqual(obj[key], value) self.assertEqual(getattr(processor, key, None), value) def test_processor_from_and_save_pretrained(self): processor_first = self.get_processor() with tempfile.TemporaryDirectory() as tmpdirname: saved_files = processor_first.save_pretrained(tmpdirname) if len(saved_files) > 0: check_json_file_has_correct_format(saved_files[0]) processor_second = self.processor_class.from_pretrained(tmpdirname) self.assertEqual(processor_second.to_dict(), processor_first.to_dict()) # These kwargs-related tests ensure that processors are correctly instantiated. # they need to be applied only if an image_processor exists. def skip_processor_without_typed_kwargs(self, processor): # TODO this signature check is to test only uniformized processors. # Once all are updated, remove it. is_kwargs_typed_dict = False call_signature = inspect.signature(processor.__call__) for param in call_signature.parameters.values(): if param.kind == param.VAR_KEYWORD and param.annotation != param.empty: is_kwargs_typed_dict = ( hasattr(param.annotation, "__origin__") and param.annotation.__origin__ == Unpack ) if not is_kwargs_typed_dict: self.skipTest(f"{self.processor_class} doesn't have typed kwargs.") @require_vision @require_torch def test_tokenizer_defaults_preserved_by_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertEqual(len(inputs["input_ids"][0]), 117) @require_torch @require_vision def test_image_processor_defaults_preserved_by_image_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor", size=(234, 234)) tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertEqual(len(inputs["pixel_values"][0][0]), 234) @require_vision @require_torch def test_kwargs_overrides_default_tokenizer_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", padding="longest") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length" ) self.assertEqual(len(inputs["input_ids"][0]), 112) @require_torch @require_vision def test_kwargs_overrides_default_image_processor_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor", size=(234, 234)) tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, size=[224, 224]) self.assertEqual(len(inputs["pixel_values"][0][0]), 224) @require_torch @require_vision def test_unstructured_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor( text=input_str, images=image_input, return_tensors="pt", size={"height": 214, "width": 214}, padding="max_length", max_length=76, ) self.assertEqual(inputs["pixel_values"].shape[2], 214) self.assertEqual(len(inputs["input_ids"][0]), 76) @require_torch @require_vision def test_unstructured_kwargs_batched(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = ["lower newer", "upper older longer string"] image_input = self.prepare_image_inputs() * 2 inputs = processor( text=input_str, images=image_input, return_tensors="pt", size={"height": 214, "width": 214}, padding="longest", max_length=76, ) self.assertEqual(inputs["pixel_values"].shape[2], 214) self.assertEqual(len(inputs["input_ids"][0]), 6) @require_torch @require_vision def test_doubly_passed_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = ["lower newer"] image_input = self.prepare_image_inputs() with self.assertRaises(ValueError): _ = processor( text=input_str, images=image_input, images_kwargs={"size": {"height": 222, "width": 222}}, size={"height": 214, "width": 214}, ) @require_torch @require_vision def test_structured_kwargs_nested(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"size": {"height": 214, "width": 214}}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.skip_processor_without_typed_kwargs(processor) self.assertEqual(inputs["pixel_values"].shape[2], 214) self.assertEqual(len(inputs["input_ids"][0]), 76) @require_torch @require_vision def test_structured_kwargs_nested_from_dict(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = "lower newer" image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"size": {"height": 214, "width": 214}}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.assertEqual(inputs["pixel_values"].shape[2], 214) self.assertEqual(len(inputs["input_ids"][0]), 76) class MyProcessor(ProcessorMixin): attributes = ["image_processor", "tokenizer"] image_processor_class = "CLIPImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, processor_attr_1=1, processor_attr_2=True): super().__init__(image_processor, tokenizer) self.processor_attr_1 = processor_attr_1 self.processor_attr_2 = processor_attr_2 @require_tokenizers @require_vision class ProcessorTest(unittest.TestCase): processor_class = MyProcessor def prepare_processor_dict(self): return {"processor_attr_1": 1, "processor_attr_2": False} def get_processor(self): image_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14") tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-large-patch14") processor = MyProcessor(image_processor, tokenizer, **self.prepare_processor_dict()) return processor def test_processor_to_json_string(self): processor = self.get_processor() obj = json.loads(processor.to_json_string()) for key, value in self.prepare_processor_dict().items(): self.assertEqual(obj[key], value) self.assertEqual(getattr(processor, key, None), value) def test_processor_from_and_save_pretrained(self): processor_first = self.get_processor() with tempfile.TemporaryDirectory() as tmpdirname: saved_file = processor_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) processor_second = self.processor_class.from_pretrained(tmpdirname) self.assertEqual(processor_second.to_dict(), processor_first.to_dict())
transformers/tests/test_processing_common.py/0
{ "file_path": "transformers/tests/test_processing_common.py", "repo_id": "transformers", "token_count": 5765 }
465
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.activations_tf import get_tf_activation @require_tf class TestTFActivations(unittest.TestCase): def test_gelu_10(self): x = tf.constant([-100, -1.0, -0.1, 0, 0.1, 1.0, 100.0]) gelu = get_tf_activation("gelu") gelu10 = get_tf_activation("gelu_10") y_gelu = gelu(x) y_gelu_10 = gelu10(x) clipped_mask = tf.where(y_gelu_10 < 10.0, 1.0, 0.0) self.assertEqual(tf.math.reduce_max(y_gelu_10).numpy().item(), 10.0) self.assertTrue(np.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_tf_activation("gelu") get_tf_activation("gelu_10") get_tf_activation("gelu_fast") get_tf_activation("gelu_new") get_tf_activation("glu") get_tf_activation("mish") get_tf_activation("quick_gelu") get_tf_activation("relu") get_tf_activation("sigmoid") get_tf_activation("silu") get_tf_activation("swish") get_tf_activation("tanh") with self.assertRaises(KeyError): get_tf_activation("bogus") with self.assertRaises(KeyError): get_tf_activation(None)
transformers/tests/utils/test_activations_tf.py/0
{ "file_path": "transformers/tests/utils/test_activations_tf.py", "repo_id": "transformers", "token_count": 803 }
466
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import hf_hub_download from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) RANDOM_BERT = "hf-internal-testing/tiny-random-bert" TINY_BERT_PT_ONLY = "hf-internal-testing/tiny-bert-pt-only" CACHE_DIR = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") FULL_COMMIT_HASH = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" GATED_REPO = "hf-internal-testing/dummy-gated-model" README_FILE = "README.md" class GetFromCacheTests(unittest.TestCase): def test_cached_file(self): archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) # Should have downloaded the file in here self.assertTrue(os.path.isdir(CACHE_DIR)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(CACHE_DIR, subfolder))) with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", main_commit, CONFIG_NAME)) self.assertTrue(os.path.isfile(archive_file)) # File is cached at the same place the second time. new_archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) self.assertEqual(archive_file, new_archive_file) # Using a specific revision to test the full commit hash. archive_file = cached_file(RANDOM_BERT, CONFIG_NAME, revision="9b8c223") self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", FULL_COMMIT_HASH, CONFIG_NAME)) def test_cached_file_errors(self): with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): _ = cached_file("tiny-random-bert", CONFIG_NAME) with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): _ = cached_file(RANDOM_BERT, CONFIG_NAME, revision="aaaa") with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") def test_non_existence_is_cached(self): with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, ".no_exist", main_commit, "conf"))) path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False) self.assertIsNone(path) # This check we did call the fake head request mock_head.assert_called() def test_has_file(self): self.assertTrue(has_file(TINY_BERT_PT_ONLY, WEIGHTS_NAME)) self.assertFalse(has_file(TINY_BERT_PT_ONLY, TF2_WEIGHTS_NAME)) self.assertFalse(has_file(TINY_BERT_PT_ONLY, FLAX_WEIGHTS_NAME)) def test_has_file_in_cache(self): with tempfile.TemporaryDirectory() as tmp_dir: # Empty cache dir + offline mode => return False assert not has_file(TINY_BERT_PT_ONLY, WEIGHTS_NAME, local_files_only=True, cache_dir=tmp_dir) # Populate cache dir hf_hub_download(TINY_BERT_PT_ONLY, WEIGHTS_NAME, cache_dir=tmp_dir) # Cache dir + offline mode => return True assert has_file(TINY_BERT_PT_ONLY, WEIGHTS_NAME, local_files_only=True, cache_dir=tmp_dir) def test_get_file_from_repo_distant(self): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("google-bert/bert-base-cased", "ahah.txt")) # The function raises if the repository does not exist. with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): get_file_from_repo("bert-base-case", CONFIG_NAME) # The function raises if the revision does not exist. with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): get_file_from_repo("google-bert/bert-base-cased", CONFIG_NAME, revision="ahaha") resolved_file = get_file_from_repo("google-bert/bert-base-cased", CONFIG_NAME) # The name is the cached name which is not very easy to test, so instead we load the content. config = json.loads(open(resolved_file, "r").read()) self.assertEqual(config["hidden_size"], 768) def test_get_file_from_repo_local(self): with tempfile.TemporaryDirectory() as tmp_dir: filename = Path(tmp_dir) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(tmp_dir, "a.txt"), str(filename)) self.assertIsNone(get_file_from_repo(tmp_dir, "b.txt")) def test_get_file_gated_repo(self): """Test download file from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "You are trying to access a gated repo."): # All files except README.md are protected on a gated repo. cached_file(GATED_REPO, "gated_file.txt", token=False) def test_has_file_gated_repo(self): """Test check file existence from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "is a gated repository"): # All files except README.md are protected on a gated repo. has_file(GATED_REPO, "gated_file.txt", token=False)
transformers/tests/utils/test_hub_utils.py/0
{ "file_path": "transformers/tests/utils/test_hub_utils.py", "repo_id": "transformers", "token_count": 2860 }
467
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script to add and/or update the attribute `pipeline_model_mapping` in model test files. This script will be (mostly) used in the following 2 situations: - run within a (scheduled) CI job to: - check if model test files in the library have updated `pipeline_model_mapping`, - and/or update test files and (possibly) open a GitHub pull request automatically - being run by a `transformers` member to quickly check and update some particular test file(s) This script is **NOT** intended to be run (manually) by community contributors. """ import argparse import glob import inspect import os import re import unittest from get_test_info import get_test_classes from tests.test_pipeline_mixin import pipeline_test_mapping PIPELINE_TEST_MAPPING = {} for task, _ in pipeline_test_mapping.items(): PIPELINE_TEST_MAPPING[task] = {"pt": None, "tf": None} # DO **NOT** add item to this set (unless the reason is approved) TEST_FILE_TO_IGNORE = { "tests/models/esm/test_modeling_esmfold.py", # The pipeline test mapping is added to `test_modeling_esm.py` } def get_framework(test_class): """Infer the framework from the test class `test_class`.""" if "ModelTesterMixin" in [x.__name__ for x in test_class.__bases__]: return "pt" elif "TFModelTesterMixin" in [x.__name__ for x in test_class.__bases__]: return "tf" elif "FlaxModelTesterMixin" in [x.__name__ for x in test_class.__bases__]: return "flax" else: return None def get_mapping_for_task(task, framework): """Get mappings defined in `XXXPipelineTests` for the task `task`.""" # Use the cached results if PIPELINE_TEST_MAPPING[task].get(framework, None) is not None: return PIPELINE_TEST_MAPPING[task][framework] pipeline_test_class = pipeline_test_mapping[task]["test"] mapping = None if framework == "pt": mapping = getattr(pipeline_test_class, "model_mapping", None) elif framework == "tf": mapping = getattr(pipeline_test_class, "tf_model_mapping", None) if mapping is not None: mapping = dict(mapping.items()) # cache the results PIPELINE_TEST_MAPPING[task][framework] = mapping return mapping def get_model_for_pipeline_test(test_class, task): """Get the model architecture(s) related to the test class `test_class` for a pipeline `task`.""" framework = get_framework(test_class) if framework is None: return None mapping = get_mapping_for_task(task, framework) if mapping is None: return None config_classes = list({model_class.config_class for model_class in test_class.all_model_classes}) if len(config_classes) != 1: raise ValueError("There should be exactly one configuration class from `test_class.all_model_classes`.") # This could be a list/tuple of model classes, but it's rare. model_class = mapping.get(config_classes[0], None) if isinstance(model_class, (tuple, list)): model_class = sorted(model_class, key=lambda x: x.__name__) return model_class def get_pipeline_model_mapping(test_class): """Get `pipeline_model_mapping` for `test_class`.""" mapping = [(task, get_model_for_pipeline_test(test_class, task)) for task in pipeline_test_mapping] mapping = sorted([(task, model) for task, model in mapping if model is not None], key=lambda x: x[0]) return dict(mapping) def get_pipeline_model_mapping_string(test_class): """Get `pipeline_model_mapping` for `test_class` as a string (to be added to the test file). This will be a 1-line string. After this is added to a test file, `make style` will format it beautifully. """ framework = get_framework(test_class) if framework == "pt": framework = "torch" default_value = "{}" mapping = get_pipeline_model_mapping(test_class) if len(mapping) == 0: return "" texts = [] for task, model_classes in mapping.items(): if isinstance(model_classes, (tuple, list)): # A list/tuple of model classes value = "(" + ", ".join([x.__name__ for x in model_classes]) + ")" else: # A single model class value = model_classes.__name__ texts.append(f'"{task}": {value}') text = "{" + ", ".join(texts) + "}" text = f"pipeline_model_mapping = {text} if is_{framework}_available() else {default_value}" return text def is_valid_test_class(test_class): """Restrict to `XXXModelTesterMixin` and should be a subclass of `unittest.TestCase`.""" base_class_names = {"ModelTesterMixin", "TFModelTesterMixin", "FlaxModelTesterMixin"} if not issubclass(test_class, unittest.TestCase): return False return len(base_class_names.intersection([x.__name__ for x in test_class.__bases__])) > 0 def find_test_class(test_file): """Find a test class in `test_file` to which we will add `pipeline_model_mapping`.""" test_classes = [x for x in get_test_classes(test_file) if is_valid_test_class(x)] target_test_class = None for test_class in test_classes: # If a test class has defined `pipeline_model_mapping`, let's take it if getattr(test_class, "pipeline_model_mapping", None) is not None: target_test_class = test_class break # Take the test class with the shortest name (just a heuristic) if target_test_class is None and len(test_classes) > 0: target_test_class = sorted(test_classes, key=lambda x: (len(x.__name__), x.__name__))[0] return target_test_class def find_block_ending(lines, start_idx, indent_level): end_idx = start_idx for idx, line in enumerate(lines[start_idx:]): indent = len(line) - len(line.lstrip()) if idx == 0 or indent > indent_level or (indent == indent_level and line.strip() == ")"): end_idx = start_idx + idx elif idx > 0 and indent <= indent_level: # Outside the definition block of `pipeline_model_mapping` break return end_idx def add_pipeline_model_mapping(test_class, overwrite=False): """Add `pipeline_model_mapping` to `test_class`.""" if getattr(test_class, "pipeline_model_mapping", None) is not None: if not overwrite: return "", -1 line_to_add = get_pipeline_model_mapping_string(test_class) if len(line_to_add) == 0: return "", -1 line_to_add = line_to_add + "\n" # The code defined the class `test_class` class_lines, class_start_line_no = inspect.getsourcelines(test_class) # `inspect` gives the code for an object, including decorator(s) if any. # We (only) need the exact line of the class definition. for idx, line in enumerate(class_lines): if line.lstrip().startswith("class "): class_lines = class_lines[idx:] class_start_line_no += idx break class_end_line_no = class_start_line_no + len(class_lines) - 1 # The index in `class_lines` that starts the definition of `all_model_classes`, `all_generative_model_classes` or # `pipeline_model_mapping`. This assumes they are defined in such order, and we take the start index of the last # block that appears in a `test_class`. start_idx = None # The indent level of the line at `class_lines[start_idx]` (if defined) indent_level = 0 # To record if `pipeline_model_mapping` is found in `test_class`. def_line = None for idx, line in enumerate(class_lines): if line.strip().startswith("all_model_classes = "): indent_level = len(line) - len(line.lstrip()) start_idx = idx elif line.strip().startswith("all_generative_model_classes = "): indent_level = len(line) - len(line.lstrip()) start_idx = idx elif line.strip().startswith("pipeline_model_mapping = "): indent_level = len(line) - len(line.lstrip()) start_idx = idx def_line = line break if start_idx is None: return "", -1 # Find the ending index (inclusive) of the above found block. end_idx = find_block_ending(class_lines, start_idx, indent_level) # Extract `is_xxx_available()` from existing blocks: some models require specific libraries like `timm` and use # `is_timm_available()` instead of `is_torch_available()`. # Keep leading and trailing whitespaces r = re.compile(r"\s(is_\S+?_available\(\))\s") for line in class_lines[start_idx : end_idx + 1]: backend_condition = r.search(line) if backend_condition is not None: # replace the leading and trailing whitespaces to the space character " ". target = " " + backend_condition[0][1:-1] + " " line_to_add = r.sub(target, line_to_add) break if def_line is None: # `pipeline_model_mapping` is not defined. The target index is set to the ending index (inclusive) of # `all_model_classes` or `all_generative_model_classes`. target_idx = end_idx else: # `pipeline_model_mapping` is defined. The target index is set to be one **BEFORE** its start index. target_idx = start_idx - 1 # mark the lines of the currently existing `pipeline_model_mapping` to be removed. for idx in range(start_idx, end_idx + 1): # These lines are going to be removed before writing to the test file. class_lines[idx] = None # noqa # Make sure the test class is a subclass of `PipelineTesterMixin`. parent_classes = [x.__name__ for x in test_class.__bases__] if "PipelineTesterMixin" not in parent_classes: # Put `PipelineTesterMixin` just before `unittest.TestCase` _parent_classes = [x for x in parent_classes if x != "TestCase"] + ["PipelineTesterMixin"] if "TestCase" in parent_classes: # Here we **assume** the original string is always with `unittest.TestCase`. _parent_classes.append("unittest.TestCase") parent_classes = ", ".join(_parent_classes) for idx, line in enumerate(class_lines): # Find the ending of the declaration of `test_class` if line.strip().endswith("):"): # mark the lines of the declaration of `test_class` to be removed for _idx in range(idx + 1): class_lines[_idx] = None # noqa break # Add the new, one-line, class declaration for `test_class` class_lines[0] = f"class {test_class.__name__}({parent_classes}):\n" # Add indentation line_to_add = " " * indent_level + line_to_add # Insert `pipeline_model_mapping` to `class_lines`. # (The line at `target_idx` should be kept by definition!) class_lines = class_lines[: target_idx + 1] + [line_to_add] + class_lines[target_idx + 1 :] # Remove the lines that are marked to be removed class_lines = [x for x in class_lines if x is not None] # Move from test class to module (in order to write to the test file) module_lines = inspect.getsourcelines(inspect.getmodule(test_class))[0] # Be careful with the 1-off between line numbers and array indices module_lines = module_lines[: class_start_line_no - 1] + class_lines + module_lines[class_end_line_no:] code = "".join(module_lines) moddule_file = inspect.getsourcefile(test_class) with open(moddule_file, "w", encoding="UTF-8", newline="\n") as fp: fp.write(code) return line_to_add def add_pipeline_model_mapping_to_test_file(test_file, overwrite=False): """Add `pipeline_model_mapping` to `test_file`.""" test_class = find_test_class(test_file) if test_class: add_pipeline_model_mapping(test_class, overwrite=overwrite) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--test_file", type=str, help="A path to the test file, starting with the repository's `tests` directory." ) parser.add_argument( "--all", action="store_true", help="If to check and modify all test files.", ) parser.add_argument( "--overwrite", action="store_true", help="If to overwrite a test class if it has already defined `pipeline_model_mapping`.", ) args = parser.parse_args() if not args.all and not args.test_file: raise ValueError("Please specify either `test_file` or pass `--all` to check/modify all test files.") elif args.all and args.test_file: raise ValueError("Only one of `--test_file` and `--all` could be specified.") test_files = [] if args.test_file: test_files = [args.test_file] else: pattern = os.path.join("tests", "models", "**", "test_modeling_*.py") for test_file in glob.glob(pattern): # `Flax` is not concerned at this moment if not test_file.startswith("test_modeling_flax_"): test_files.append(test_file) for test_file in test_files: if test_file in TEST_FILE_TO_IGNORE: print(f"[SKIPPED] {test_file} is skipped as it is in `TEST_FILE_TO_IGNORE` in the file {__file__}.") continue add_pipeline_model_mapping_to_test_file(test_file, overwrite=args.overwrite)
transformers/utils/add_pipeline_model_mapping_to_test.py/0
{ "file_path": "transformers/utils/add_pipeline_model_mapping_to_test.py", "repo_id": "transformers", "token_count": 5411 }
468
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections.abc import copy import inspect import json import multiprocessing import os import shutil import tempfile import traceback from pathlib import Path from check_config_docstrings import get_checkpoint_from_config_class from datasets import load_dataset from get_test_info import get_model_to_tester_mapping, get_tester_classes_for_model from huggingface_hub import Repository, create_repo, hf_api, upload_folder from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoTokenizer, LayoutLMv3TokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, logging, ) from transformers.feature_extraction_utils import FeatureExtractionMixin from transformers.file_utils import is_tf_available, is_torch_available from transformers.image_processing_utils import BaseImageProcessor from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name from transformers.models.fsmt import configuration_fsmt from transformers.processing_utils import ProcessorMixin, transformers_module from transformers.tokenization_utils_base import PreTrainedTokenizerBase # make sure tokenizer plays nice with multiprocessing os.environ["TOKENIZERS_PARALLELISM"] = "false" logging.set_verbosity_error() logging.disable_progress_bar() logger = logging.get_logger(__name__) os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" if not is_torch_available(): raise ValueError("Please install PyTorch.") if not is_tf_available(): raise ValueError("Please install TensorFlow.") FRAMEWORKS = ["pytorch", "tensorflow"] INVALID_ARCH = [] TARGET_VOCAB_SIZE = 1024 data = {"training_ds": None, "testing_ds": None} COMPOSITE_MODELS = { "EncoderDecoderModel": "EncoderDecoderModel-bert-bert", "SpeechEncoderDecoderModel": "SpeechEncoderDecoderModel-wav2vec2-bert", "VisionEncoderDecoderModel": "VisionEncoderDecoderModel-vit-gpt2", "VisionTextDualEncoderModel": "VisionTextDualEncoderModel-vit-bert", } # This list contains the model architectures for which a tiny version could not be created. # Avoid to add new architectures here - unless we have verified carefully that it's (almost) impossible to create them. # One such case is: no model tester class is implemented for a model type (like `MT5`) because its architecture is # identical to another one (`MT5` is based on `T5`), but trained on different datasets or with different techniques. UNCONVERTIBLE_MODEL_ARCHITECTURES = { "BertGenerationEncoder", "BertGenerationDecoder", "CamembertForSequenceClassification", "CamembertForMultipleChoice", "CamembertForMaskedLM", "CamembertForCausalLM", "CamembertForTokenClassification", "CamembertForQuestionAnswering", "CamembertModel", "TFCamembertForMultipleChoice", "TFCamembertForTokenClassification", "TFCamembertForQuestionAnswering", "TFCamembertForSequenceClassification", "TFCamembertForMaskedLM", "TFCamembertModel", "TFCamembertForCausalLM", "DecisionTransformerModel", "GraphormerModel", "InformerModel", "JukeboxModel", "MarianForCausalLM", "MaskFormerSwinModel", "MaskFormerSwinBackbone", "MT5Model", "MT5ForConditionalGeneration", "UMT5ForConditionalGeneration", "TFMT5ForConditionalGeneration", "TFMT5Model", "QDQBertForSequenceClassification", "QDQBertForMaskedLM", "QDQBertModel", "QDQBertForTokenClassification", "QDQBertLMHeadModel", "QDQBertForMultipleChoice", "QDQBertForQuestionAnswering", "QDQBertForNextSentencePrediction", "ReformerModelWithLMHead", "RetriBertModel", "Speech2Text2ForCausalLM", "TimeSeriesTransformerModel", "TrajectoryTransformerModel", "TrOCRForCausalLM", "XLMProphetNetForConditionalGeneration", "XLMProphetNetForCausalLM", "XLMProphetNetModel", "XLMRobertaModel", "XLMRobertaForTokenClassification", "XLMRobertaForMultipleChoice", "XLMRobertaForMaskedLM", "XLMRobertaForCausalLM", "XLMRobertaForSequenceClassification", "XLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForCausalLM", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaModel", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForTokenClassification", } def get_processor_types_from_config_class(config_class, allowed_mappings=None): """Return a tuple of processors for `config_class`. We use `tuple` here to include (potentially) both slow & fast tokenizers. """ # To make a uniform return type def _to_tuple(x): if not isinstance(x, collections.abc.Sequence): x = (x,) else: x = tuple(x) return x if allowed_mappings is None: allowed_mappings = ["processor", "tokenizer", "image_processor", "feature_extractor"] processor_types = () # Check first if a model has `ProcessorMixin`. Otherwise, check if it has tokenizers, and/or an image processor or # a feature extractor if config_class in PROCESSOR_MAPPING and "processor" in allowed_mappings: processor_types = _to_tuple(PROCESSOR_MAPPING[config_class]) else: if config_class in TOKENIZER_MAPPING and "tokenizer" in allowed_mappings: processor_types = TOKENIZER_MAPPING[config_class] if config_class in IMAGE_PROCESSOR_MAPPING and "image_processor" in allowed_mappings: processor_types += _to_tuple(IMAGE_PROCESSOR_MAPPING[config_class]) elif config_class in FEATURE_EXTRACTOR_MAPPING and "feature_extractor" in allowed_mappings: processor_types += _to_tuple(FEATURE_EXTRACTOR_MAPPING[config_class]) # Remark: some configurations have no processor at all. For example, generic composite models like # `EncoderDecoderModel` is used for any (compatible) text models. Also, `DecisionTransformer` doesn't # require any processor. # We might get `None` for some tokenizers - remove them here. processor_types = tuple(p for p in processor_types if p is not None) return processor_types def get_architectures_from_config_class(config_class, arch_mappings, models_to_skip=None): """Return a tuple of all possible architectures attributed to a configuration class `config_class`. For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering]. """ # A model architecture could appear in several mappings. For example, `BartForConditionalGeneration` is in # - MODEL_FOR_PRETRAINING_MAPPING_NAMES # - MODEL_WITH_LM_HEAD_MAPPING_NAMES # - MODEL_FOR_MASKED_LM_MAPPING_NAMES # - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES # We avoid the duplication. architectures = set() if models_to_skip is None: models_to_skip = [] models_to_skip = UNCONVERTIBLE_MODEL_ARCHITECTURES.union(models_to_skip) for mapping in arch_mappings: if config_class in mapping: models = mapping[config_class] models = tuple(models) if isinstance(models, collections.abc.Sequence) else (models,) for model in models: if model.__name__ not in models_to_skip: architectures.add(model) architectures = tuple(architectures) return architectures def get_config_class_from_processor_class(processor_class): """Get the config class from a processor class. Some config/model classes use tokenizers/feature_extractors from other models. For example, `GPT-J` uses `GPT2Tokenizer`. If no checkpoint is found for a config class, or a checkpoint is found without necessary file(s) to create the processor for `processor_class`, we get the config class that corresponds to `processor_class` and use it to find a checkpoint in order to create the processor. """ processor_prefix = processor_class.__name__ for postfix in ["TokenizerFast", "Tokenizer", "ImageProcessor", "FeatureExtractor", "Processor"]: processor_prefix = processor_prefix.replace(postfix, "") # `Wav2Vec2CTCTokenizer` -> `Wav2Vec2Config` if processor_prefix == "Wav2Vec2CTC": processor_prefix = "Wav2Vec2" # Find the new configuration class new_config_name = f"{processor_prefix}Config" new_config_class = getattr(transformers_module, new_config_name) return new_config_class def build_processor(config_class, processor_class, allow_no_checkpoint=False): """Create a processor for `processor_class`. If a processor is not able to be built with the original arguments, this method tries to change the arguments and call itself recursively, by inferring a new `config_class` or a new `processor_class` from another one, in order to find a checkpoint containing the necessary files to build a processor. The processor is not saved here. Instead, it will be saved in `convert_processors` after further changes in `convert_processors`. For each model architecture`, a copy will be created and saved along the built model. """ # Currently, this solely uses the docstring in the source file of `config_class` to find a checkpoint. checkpoint = get_checkpoint_from_config_class(config_class) if checkpoint is None: # try to get the checkpoint from the config class for `processor_class`. # This helps cases like `XCLIPConfig` and `VideoMAEFeatureExtractor` to find a checkpoint from `VideoMAEConfig`. config_class_from_processor_class = get_config_class_from_processor_class(processor_class) checkpoint = get_checkpoint_from_config_class(config_class_from_processor_class) processor = None try: processor = processor_class.from_pretrained(checkpoint) except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") # Try to get a new processor class from checkpoint. This is helpful for a checkpoint without necessary file to load # processor while `processor_class` is an Auto class. For example, `sew` has `Wav2Vec2Processor` in # `PROCESSOR_MAPPING_NAMES`, its `tokenizer_class` is `AutoTokenizer`, and the checkpoint # `https://huggingface.co/asapp/sew-tiny-100k` has no tokenizer file, but we can get # `tokenizer_class: Wav2Vec2CTCTokenizer` from the config file. (The new processor class won't be able to load from # `checkpoint`, but it helps this recursive method to find a way to build a processor). if ( processor is None and checkpoint is not None and issubclass(processor_class, (PreTrainedTokenizerBase, AutoTokenizer)) ): try: config = AutoConfig.from_pretrained(checkpoint) except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") config = None if config is not None: if not isinstance(config, config_class): raise ValueError( f"`config` (which is of type {config.__class__.__name__}) should be an instance of `config_class`" f" ({config_class.__name__})!" ) tokenizer_class = config.tokenizer_class new_processor_class = None if tokenizer_class is not None: new_processor_class = getattr(transformers_module, tokenizer_class) if new_processor_class != processor_class: processor = build_processor(config_class, new_processor_class) # If `tokenizer_class` is not specified in `config`, let's use `config` to get the process class via auto # mappings, but only allow the tokenizer mapping being used. This is to make `Wav2Vec2Conformer` build if processor is None: new_processor_classes = get_processor_types_from_config_class( config.__class__, allowed_mappings=["tokenizer"] ) # Used to avoid infinite recursion between a pair of fast/slow tokenizer types names = [ x.__name__.replace("Fast", "") for x in [processor_class, new_processor_class] if x is not None ] new_processor_classes = [ x for x in new_processor_classes if x is not None and x.__name__.replace("Fast", "") not in names ] if len(new_processor_classes) > 0: new_processor_class = new_processor_classes[0] # Let's use fast tokenizer if there is any for x in new_processor_classes: if x.__name__.endswith("Fast"): new_processor_class = x break processor = build_processor(config_class, new_processor_class) if processor is None: # Try to build each component (tokenizer & feature extractor) of a `ProcessorMixin`. if issubclass(processor_class, ProcessorMixin): attrs = {} for attr_name in processor_class.attributes: attrs[attr_name] = [] # This could be a tuple (for tokenizers). For example, `CLIPProcessor` has # - feature_extractor_class = "CLIPFeatureExtractor" # - tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") attr_class_names = getattr(processor_class, f"{attr_name}_class") if not isinstance(attr_class_names, tuple): attr_class_names = (attr_class_names,) for name in attr_class_names: attr_class = getattr(transformers_module, name) attr = build_processor(config_class, attr_class) if attr is not None: attrs[attr_name].append(attr) # try to build a `ProcessorMixin`, so we can return a single value if all(len(v) > 0 for v in attrs.values()): try: processor = processor_class(**{k: v[0] for k, v in attrs.items()}) except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") else: # `checkpoint` might lack some file(s) to load a processor. For example, `facebook/hubert-base-ls960` # has no tokenizer file to load `Wav2Vec2CTCTokenizer`. In this case, we try to build a processor # with the configuration class (for example, `Wav2Vec2Config`) corresponding to `processor_class`. config_class_from_processor_class = get_config_class_from_processor_class(processor_class) if config_class_from_processor_class != config_class: processor = build_processor(config_class_from_processor_class, processor_class) # Try to create an image processor or a feature extractor without any checkpoint if ( processor is None and allow_no_checkpoint and (issubclass(processor_class, BaseImageProcessor) or issubclass(processor_class, FeatureExtractionMixin)) ): try: processor = processor_class() except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") # validation if processor is not None: if not (isinstance(processor, processor_class) or processor_class.__name__.startswith("Auto")): raise ValueError( f"`processor` (which is of type {processor.__class__.__name__}) should be an instance of" f" {processor_class.__name__} or an Auto class!" ) return processor def get_tiny_config(config_class, model_class=None, **model_tester_kwargs): """Retrieve a tiny configuration from `config_class` using each model's `ModelTester`. Args: config_class: Subclass of `PreTrainedConfig`. Returns: An instance of `config_class` with tiny hyperparameters """ model_type = config_class.model_type # For model type like `data2vec-vision` and `donut-swin`, we can't get the config/model file name directly via # `model_type` as it would be sth. like `configuration_data2vec_vision.py`. # A simple way is to use `inspect.getsourcefile(config_class)`. config_source_file = inspect.getsourcefile(config_class) # The modeling file name without prefix (`modeling_`) and postfix (`.py`) modeling_name = config_source_file.split(os.path.sep)[-1].replace("configuration_", "").replace(".py", "") try: print("Importing", model_type_to_module_name(model_type)) module_name = model_type_to_module_name(model_type) if not modeling_name.startswith(module_name): raise ValueError(f"{modeling_name} doesn't start with {module_name}!") test_file = os.path.join("tests", "models", module_name, f"test_modeling_{modeling_name}.py") models_to_model_testers = get_model_to_tester_mapping(test_file) # Find the model tester class model_tester_class = None tester_classes = [] if model_class is not None: tester_classes = get_tester_classes_for_model(test_file, model_class) else: for _tester_classes in models_to_model_testers.values(): tester_classes.extend(_tester_classes) if len(tester_classes) > 0: # sort with the length of the class names first, then the alphabetical order # This is to avoid `T5EncoderOnlyModelTest` is used instead of `T5ModelTest`, which has # `is_encoder_decoder=False` and causes some pipeline tests failing (also failures in `Optimum` CI). # TODO: More fine grained control of the desired tester class. model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0] except ModuleNotFoundError: error = f"Tiny config not created for {model_type} - cannot find the testing module from the model name." raise ValueError(error) if model_tester_class is None: error = f"Tiny config not created for {model_type} - no model tester is found in the testing module." raise ValueError(error) # CLIP-like models have `text_model_tester` and `vision_model_tester`, and we need to pass `vocab_size` to # `text_model_tester` via `text_kwargs`. The same trick is also necessary for `Flava`. if "vocab_size" in model_tester_kwargs: if "text_kwargs" in inspect.signature(model_tester_class.__init__).parameters.keys(): vocab_size = model_tester_kwargs.pop("vocab_size") model_tester_kwargs["text_kwargs"] = {"vocab_size": vocab_size} # `parent` is an instance of `unittest.TestCase`, but we don't need it here. model_tester = model_tester_class(parent=None, **model_tester_kwargs) if hasattr(model_tester, "get_pipeline_config"): config = model_tester.get_pipeline_config() elif hasattr(model_tester, "prepare_config_and_inputs"): # `PoolFormer` has no `get_config` defined. Furthermore, it's better to use `prepare_config_and_inputs` even if # `get_config` is defined, since there might be some extra changes in `prepare_config_and_inputs`. config = model_tester.prepare_config_and_inputs()[0] elif hasattr(model_tester, "get_config"): config = model_tester.get_config() else: error = ( f"Tiny config not created for {model_type} - the model tester {model_tester_class.__name__} lacks" " necessary method to create config." ) raise ValueError(error) # make sure this is long enough (some model tester has `20` for this attr.) to pass `text-generation` # pipeline tests. max_positions = [] for key in ["max_position_embeddings", "max_source_positions", "max_target_positions"]: if getattr(config, key, 0) > 0: max_positions.append(getattr(config, key)) if getattr(config, "text_config", None) is not None: if getattr(config.text_config, key, None) is not None: max_positions.append(getattr(config.text_config, key)) if len(max_positions) > 0: max_position = max(200, min(max_positions)) for key in ["max_position_embeddings", "max_source_positions", "max_target_positions"]: if getattr(config, key, 0) > 0: setattr(config, key, max_position) if getattr(config, "text_config", None) is not None: if getattr(config.text_config, key, None) is not None: setattr(config.text_config, key, max_position) return config def convert_tokenizer(tokenizer_fast: PreTrainedTokenizerFast): new_tokenizer = tokenizer_fast.train_new_from_iterator( data["training_ds"]["text"], TARGET_VOCAB_SIZE, show_progress=False ) # Make sure it at least runs if not isinstance(new_tokenizer, LayoutLMv3TokenizerFast): new_tokenizer(data["testing_ds"]["text"]) return new_tokenizer def convert_feature_extractor(feature_extractor, tiny_config): to_convert = False kwargs = {} if hasattr(tiny_config, "image_size"): kwargs["size"] = tiny_config.image_size kwargs["crop_size"] = tiny_config.image_size to_convert = True elif ( hasattr(tiny_config, "vision_config") and tiny_config.vision_config is not None and hasattr(tiny_config.vision_config, "image_size") ): kwargs["size"] = tiny_config.vision_config.image_size kwargs["crop_size"] = tiny_config.vision_config.image_size to_convert = True # Speech2TextModel specific. if hasattr(tiny_config, "input_feat_per_channel"): kwargs["feature_size"] = tiny_config.input_feat_per_channel kwargs["num_mel_bins"] = tiny_config.input_feat_per_channel to_convert = True if to_convert: feature_extractor = feature_extractor.__class__(**kwargs) # Sanity check: on tiny image feature extractors, a large image size results in slow CI -- up to the point where it # can result in timeout issues. if ( isinstance(feature_extractor, BaseImageProcessor) and hasattr(feature_extractor, "size") and isinstance(feature_extractor.size, dict) ): largest_image_size = max(feature_extractor.size.values()) if largest_image_size > 64: # hardcoded exceptions models_with_large_image_size = ("deformable_detr", "flava", "grounding_dino", "mgp_str", "swiftformer") if any(model_name in tiny_config.model_type for model_name in models_with_large_image_size): pass else: raise ValueError( f"Image size of {tiny_config.model_type} is too large ({feature_extractor.size}). " "Please reduce it to 64 or less on each dimension. The following steps are usually the " "easiest solution: 1) confirm that you're setting `image_size` in your ModelTester class; " "2) ensure that it gets passed to the tester config init, `get_config()`." ) return feature_extractor def convert_processors(processors, tiny_config, output_folder, result): """Change a processor to work with smaller inputs. For tokenizers, we try to reduce their vocabulary size. For feature extractor, we use smaller image size or change other attributes using the values from `tiny_config`. See `convert_feature_extractor`. This method should not fail: we catch the errors and put them in `result["warnings"]` with descriptive messages. """ def _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False): """Set tokenizer(s) to `None` if the fast/slow tokenizers have different values for `vocab_size` or `length`. If `keep_fast_tokenizer=True`, the fast tokenizer will be kept. """ # sanity check 1: fast and slow tokenizers should be compatible (vocab_size) if fast_tokenizer is not None and slow_tokenizer is not None: if fast_tokenizer.vocab_size != slow_tokenizer.vocab_size: warning_message = ( "The fast/slow tokenizers " f"({fast_tokenizer.__class__.__name__}/{slow_tokenizer.__class__.__name__}) have different " "vocabulary size: " f"fast_tokenizer.vocab_size = {fast_tokenizer.vocab_size} and " f"slow_tokenizer.vocab_size = {slow_tokenizer.vocab_size}." ) result["warnings"].append(warning_message) if not keep_fast_tokenizer: fast_tokenizer = None slow_tokenizer = None # sanity check 2: fast and slow tokenizers should be compatible (length) if fast_tokenizer is not None and slow_tokenizer is not None: if len(fast_tokenizer) != len(slow_tokenizer): warning_message = ( f"The fast/slow tokenizers () have different length: " f"len(fast_tokenizer) = {len(fast_tokenizer)} and " f"len(slow_tokenizer) = {len(slow_tokenizer)}." ) result["warnings"].append(warning_message) if not keep_fast_tokenizer: fast_tokenizer = None slow_tokenizer = None return fast_tokenizer, slow_tokenizer tokenizers = [] feature_extractors = [] for processor in processors: if isinstance(processor, PreTrainedTokenizerBase): if processor.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}: tokenizers.append(processor) elif isinstance(processor, BaseImageProcessor): if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}: feature_extractors.append(processor) elif isinstance(processor, FeatureExtractionMixin): if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}: feature_extractors.append(processor) elif isinstance(processor, ProcessorMixin): if hasattr(processor, "tokenizer"): if processor.tokenizer.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}: tokenizers.append(processor.tokenizer) # Currently, we only have these 2 possibilities if hasattr(processor, "image_processor"): if processor.image_processor.__class__.__name__ not in { x.__class__.__name__ for x in feature_extractors }: feature_extractors.append(processor.image_processor) elif hasattr(processor, "feature_extractor"): if processor.feature_extractor.__class__.__name__ not in { x.__class__.__name__ for x in feature_extractors }: feature_extractors.append(processor.feature_extractor) # check the built processors have the unique type num_types = len({x.__class__.__name__ for x in feature_extractors}) if num_types >= 2: raise ValueError(f"`feature_extractors` should contain at most 1 type, but it contains {num_types} types!") num_types = len({x.__class__.__name__.replace("Fast", "") for x in tokenizers}) if num_types >= 2: raise ValueError(f"`tokenizers` should contain at most 1 tokenizer type, but it contains {num_types} types!") fast_tokenizer = None slow_tokenizer = None for tokenizer in tokenizers: if isinstance(tokenizer, PreTrainedTokenizerFast): fast_tokenizer = tokenizer else: slow_tokenizer = tokenizer # If the (original) fast/slow tokenizers don't correspond, keep only the fast tokenizer. # This doesn't necessarily imply the fast/slow tokenizers in a single Hub repo. has issues. # It's more of an issue in `build_processor` which tries to get a checkpoint with as much effort as possible. # For `YosoModel` (which uses `AlbertTokenizer(Fast)`), its real (Hub) checkpoint doesn't contain valid files to # load the slower tokenizer (`AlbertTokenizer`), and it ends up finding the (canonical) checkpoint of `AlbertModel`, # which has different vocabulary. # TODO: Try to improve `build_processor`'s definition and/or usage to avoid the above situation in the first place. fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=True) original_fast_tokenizer, original_slow_tokenizer = fast_tokenizer, slow_tokenizer if fast_tokenizer: try: # Wav2Vec2ForCTC , ByT5Tokenizer etc. all are already small enough and have no fast version that can # be retrained if fast_tokenizer.vocab_size > TARGET_VOCAB_SIZE: fast_tokenizer = convert_tokenizer(fast_tokenizer) except Exception: result["warnings"].append( ( f"Failed to convert the fast tokenizer for {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) # If `fast_tokenizer` exists, `slow_tokenizer` should correspond to it. if fast_tokenizer: # Make sure the fast tokenizer can be saved try: # We don't save it to `output_folder` at this moment - only at the end of this function. with tempfile.TemporaryDirectory() as tmpdir: fast_tokenizer.save_pretrained(tmpdir) try: slow_tokenizer = AutoTokenizer.from_pretrained(tmpdir, use_fast=False) except Exception: result["warnings"].append( ( f"Failed to load the slow tokenizer saved from {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) # Let's just keep the fast version slow_tokenizer = None except Exception: result["warnings"].append( ( f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) fast_tokenizer = None # If the (possibly converted) fast/slow tokenizers don't correspond, set them to `None`, and use the original # tokenizers. fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False) # If there is any conversion failed, we keep the original tokenizers. if (original_fast_tokenizer is not None and fast_tokenizer is None) or ( original_slow_tokenizer is not None and slow_tokenizer is None ): warning_messagae = ( "There are some issues when converting the fast/slow tokenizers. The original tokenizers from the Hub " " will be used instead." ) result["warnings"].append(warning_messagae) # Let's use the original version at the end (`original_fast_tokenizer` and `original_slow_tokenizer`) fast_tokenizer = original_fast_tokenizer slow_tokenizer = original_slow_tokenizer # Make sure the fast tokenizer can be saved if fast_tokenizer: # We don't save it to `output_folder` at this moment - only at the end of this function. with tempfile.TemporaryDirectory() as tmpdir: try: fast_tokenizer.save_pretrained(tmpdir) except Exception: result["warnings"].append( ( f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) fast_tokenizer = None # Make sure the slow tokenizer can be saved if slow_tokenizer: # We don't save it to `output_folder` at this moment - only at the end of this function. with tempfile.TemporaryDirectory() as tmpdir: try: slow_tokenizer.save_pretrained(tmpdir) except Exception: result["warnings"].append( ( f"Failed to save the slow tokenizer for {slow_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) slow_tokenizer = None # update feature extractors using the tiny config try: feature_extractors = [convert_feature_extractor(p, tiny_config) for p in feature_extractors] except Exception: result["warnings"].append( ( "Failed to convert feature extractors.", traceback.format_exc(), ) ) feature_extractors = [] if hasattr(tiny_config, "max_position_embeddings") and tiny_config.max_position_embeddings > 0: if fast_tokenizer is not None: if fast_tokenizer.__class__.__name__ in [ "RobertaTokenizerFast", "XLMRobertaTokenizerFast", "LongformerTokenizerFast", "MPNetTokenizerFast", ]: fast_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2 else: fast_tokenizer.model_max_length = tiny_config.max_position_embeddings if slow_tokenizer is not None: if slow_tokenizer.__class__.__name__ in [ "RobertaTokenizer", "XLMRobertaTokenizer", "LongformerTokenizer", "MPNetTokenizer", ]: slow_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2 else: slow_tokenizer.model_max_length = tiny_config.max_position_embeddings processors = [fast_tokenizer, slow_tokenizer] + feature_extractors processors = [p for p in processors if p is not None] for p in processors: p.save_pretrained(output_folder) return processors def get_checkpoint_dir(output_dir, model_arch): """Get framework-agnostic architecture name. Used to save all PT/TF/Flax models into the same directory.""" arch_name = model_arch.__name__ if arch_name.startswith("TF"): arch_name = arch_name[2:] elif arch_name.startswith("Flax"): arch_name = arch_name[4:] return os.path.join(output_dir, arch_name) def build_model(model_arch, tiny_config, output_dir): """Create and save a model for `model_arch`. Also copy the set of processors to each model (under the same model type) output folder. """ checkpoint_dir = get_checkpoint_dir(output_dir, model_arch) processor_output_dir = os.path.join(output_dir, "processors") # copy the (same set of) processors (for a model type) to the model arch. specific folder if os.path.isdir(processor_output_dir): shutil.copytree(processor_output_dir, checkpoint_dir, dirs_exist_ok=True) tiny_config = copy.deepcopy(tiny_config) if any(model_arch.__name__.endswith(x) for x in ["ForCausalLM", "LMHeadModel"]): tiny_config.is_encoder_decoder = False tiny_config.is_decoder = True model = model_arch(config=tiny_config) model.save_pretrained(checkpoint_dir) model.from_pretrained(checkpoint_dir) return model def fill_result_with_error(result, error, trace, models_to_create): """Fill `result` with errors for all target model arch if we can't build processor""" error = (error, trace) result["error"] = error for framework in FRAMEWORKS: if framework in models_to_create: result[framework] = {} for model_arch in models_to_create[framework]: result[framework][model_arch.__name__] = {"model": None, "checkpoint": None, "error": error} result["processor"] = {p.__class__.__name__: p.__class__.__name__ for p in result["processor"].values()} def upload_model(model_dir, organization, token): """Upload the tiny models""" arch_name = model_dir.split(os.path.sep)[-1] repo_name = f"tiny-random-{arch_name}" repo_id = f"{organization}/{repo_name}" repo_exist = False error = None try: create_repo(repo_id=repo_id, exist_ok=False, repo_type="model", token=token) except Exception as e: error = e if "You already created" in str(e): error = None logger.warning("Remote repository exists and will be cloned.") repo_exist = True try: create_repo(repo_id=repo_id, exist_ok=True, repo_type="model", token=token) except Exception as e: error = e if error is not None: raise error with tempfile.TemporaryDirectory() as tmpdir: repo = Repository(local_dir=tmpdir, clone_from=repo_id, token=token) repo.git_pull() shutil.copytree(model_dir, tmpdir, dirs_exist_ok=True) if repo_exist: # Open a PR on the existing Hub repo. hub_pr_url = upload_folder( folder_path=model_dir, repo_id=repo_id, repo_type="model", commit_message=f"Update tiny models for {arch_name}", commit_description=f"Upload tiny models for {arch_name}", create_pr=True, token=token, ) logger.warning(f"PR open in {hub_pr_url}.") # TODO: We need this information? else: # Push to Hub repo directly repo.git_add(auto_lfs_track=True) repo.git_commit(f"Upload tiny models for {arch_name}") repo.git_push(blocking=True) # this prints a progress bar with the upload logger.warning(f"Tiny models {arch_name} pushed to {repo_id}.") def build_composite_models(config_class, output_dir): import tempfile from transformers import ( BertConfig, BertLMHeadModel, BertModel, BertTokenizer, BertTokenizerFast, EncoderDecoderModel, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, GPT2TokenizerFast, SpeechEncoderDecoderModel, TFEncoderDecoderModel, TFVisionEncoderDecoderModel, TFVisionTextDualEncoderModel, VisionEncoderDecoderModel, VisionTextDualEncoderModel, ViTConfig, ViTFeatureExtractor, ViTModel, Wav2Vec2Config, Wav2Vec2Model, Wav2Vec2Processor, ) # These will be removed at the end if they are empty result = {"error": None, "warnings": []} if config_class.model_type == "encoder-decoder": encoder_config_class = BertConfig decoder_config_class = BertConfig encoder_processor = (BertTokenizerFast, BertTokenizer) decoder_processor = (BertTokenizerFast, BertTokenizer) encoder_class = BertModel decoder_class = BertLMHeadModel model_class = EncoderDecoderModel tf_model_class = TFEncoderDecoderModel elif config_class.model_type == "vision-encoder-decoder": encoder_config_class = ViTConfig decoder_config_class = GPT2Config encoder_processor = (ViTFeatureExtractor,) decoder_processor = (GPT2TokenizerFast, GPT2Tokenizer) encoder_class = ViTModel decoder_class = GPT2LMHeadModel model_class = VisionEncoderDecoderModel tf_model_class = TFVisionEncoderDecoderModel elif config_class.model_type == "speech-encoder-decoder": encoder_config_class = Wav2Vec2Config decoder_config_class = BertConfig encoder_processor = (Wav2Vec2Processor,) decoder_processor = (BertTokenizerFast, BertTokenizer) encoder_class = Wav2Vec2Model decoder_class = BertLMHeadModel model_class = SpeechEncoderDecoderModel tf_model_class = None elif config_class.model_type == "vision-text-dual-encoder": # Not encoder-decoder, but encoder-encoder. We just keep the same name as above to make code easier encoder_config_class = ViTConfig decoder_config_class = BertConfig encoder_processor = (ViTFeatureExtractor,) decoder_processor = (BertTokenizerFast, BertTokenizer) encoder_class = ViTModel decoder_class = BertModel model_class = VisionTextDualEncoderModel tf_model_class = TFVisionTextDualEncoderModel with tempfile.TemporaryDirectory() as tmpdir: try: # build encoder models_to_create = {"processor": encoder_processor, "pytorch": (encoder_class,), "tensorflow": []} encoder_output_dir = os.path.join(tmpdir, "encoder") build(encoder_config_class, models_to_create, encoder_output_dir) # build decoder models_to_create = {"processor": decoder_processor, "pytorch": (decoder_class,), "tensorflow": []} decoder_output_dir = os.path.join(tmpdir, "decoder") build(decoder_config_class, models_to_create, decoder_output_dir) # build encoder-decoder encoder_path = os.path.join(encoder_output_dir, encoder_class.__name__) decoder_path = os.path.join(decoder_output_dir, decoder_class.__name__) if config_class.model_type != "vision-text-dual-encoder": # Specify these explicitly for encoder-decoder like models, but not for `vision-text-dual-encoder` as it # has no decoder. decoder_config = decoder_config_class.from_pretrained(decoder_path) decoder_config.is_decoder = True decoder_config.add_cross_attention = True model = model_class.from_encoder_decoder_pretrained( encoder_path, decoder_path, decoder_config=decoder_config, ) elif config_class.model_type == "vision-text-dual-encoder": model = model_class.from_vision_text_pretrained(encoder_path, decoder_path) model_path = os.path.join( output_dir, f"{model_class.__name__}-{encoder_config_class.model_type}-{decoder_config_class.model_type}", ) model.save_pretrained(model_path) if tf_model_class is not None: model = tf_model_class.from_pretrained(model_path) model.save_pretrained(model_path) # copy the processors encoder_processor_path = os.path.join(encoder_output_dir, "processors") decoder_processor_path = os.path.join(decoder_output_dir, "processors") if os.path.isdir(encoder_processor_path): shutil.copytree(encoder_processor_path, model_path, dirs_exist_ok=True) if os.path.isdir(decoder_processor_path): shutil.copytree(decoder_processor_path, model_path, dirs_exist_ok=True) # fill `result` result["processor"] = {x.__name__: x.__name__ for x in encoder_processor + decoder_processor} result["pytorch"] = {model_class.__name__: {"model": model_class.__name__, "checkpoint": model_path}} result["tensorflow"] = {} if tf_model_class is not None: result["tensorflow"] = { tf_model_class.__name__: {"model": tf_model_class.__name__, "checkpoint": model_path} } except Exception: result["error"] = ( f"Failed to build models for {config_class.__name__}.", traceback.format_exc(), ) if not result["error"]: del result["error"] if not result["warnings"]: del result["warnings"] return result def get_token_id_from_tokenizer(token_id_name, tokenizer, original_token_id): """Use `tokenizer` to get the values of `bos_token_id`, `eos_token_ids`, etc. The argument `token_id_name` should be a string ending with `_token_id`, and `original_token_id` should be an integer that will be return if `tokenizer` has no token corresponding to `token_id_name`. """ token_id = original_token_id if not token_id_name.endswith("_token_id"): raise ValueError(f"`token_id_name` is {token_id_name}, which doesn't end with `_token_id`!") token = getattr(tokenizer, token_id_name.replace("_token_id", "_token"), None) if token is not None: if isinstance(tokenizer, PreTrainedTokenizerFast): token_id = tokenizer._convert_token_to_id_with_added_voc(token) else: token_id = tokenizer._convert_token_to_id(token) return token_id def get_config_overrides(config_class, processors): # `Bark` configuration is too special. Let's just not handle this for now. if config_class.__name__ == "BarkConfig": return {} config_overrides = {} # Check if there is any tokenizer (prefer fast version if any) tokenizer = None for processor in processors: if isinstance(processor, PreTrainedTokenizerFast): tokenizer = processor break elif isinstance(processor, PreTrainedTokenizer): tokenizer = processor if tokenizer is None: return config_overrides # Get some properties of the (already converted) tokenizer (smaller vocab size, special token ids, etc.) # We use `len(tokenizer)` instead of `tokenizer.vocab_size` to avoid potential issues for tokenizers with non-empty # `added_tokens_encoder`. One example is the `DebertaV2Tokenizer` where the mask token is the extra token. vocab_size = len(tokenizer) # The original checkpoint has length `35998`, but it doesn't have ids `30400` and `30514` but instead `35998` and # `35999`. if config_class.__name__ == "GPTSanJapaneseConfig": vocab_size += 2 config_overrides["vocab_size"] = vocab_size # Used to create a new model tester with `tokenizer.vocab_size` in order to get the (updated) special token ids. model_tester_kwargs = {"vocab_size": vocab_size} # `FSMTModelTester` accepts `src_vocab_size` and `tgt_vocab_size` but not `vocab_size`. if config_class.__name__ == "FSMTConfig": del model_tester_kwargs["vocab_size"] model_tester_kwargs["src_vocab_size"] = tokenizer.src_vocab_size model_tester_kwargs["tgt_vocab_size"] = tokenizer.tgt_vocab_size _tiny_config = get_tiny_config(config_class, **model_tester_kwargs) # handle the possibility of `text_config` inside `_tiny_config` for clip-like models (`owlvit`, `groupvit`, etc.) if hasattr(_tiny_config, "text_config"): _tiny_config = _tiny_config.text_config # Collect values of some special token ids for attr in dir(_tiny_config): if attr.endswith("_token_id"): token_id = getattr(_tiny_config, attr) if token_id is not None: # Using the token id values from `tokenizer` instead of from `_tiny_config`. token_id = get_token_id_from_tokenizer(attr, tokenizer, original_token_id=token_id) config_overrides[attr] = token_id if config_class.__name__ == "FSMTConfig": config_overrides["src_vocab_size"] = tokenizer.src_vocab_size config_overrides["tgt_vocab_size"] = tokenizer.tgt_vocab_size # `FSMTConfig` has `DecoderConfig` as `decoder` attribute. config_overrides["decoder"] = configuration_fsmt.DecoderConfig( vocab_size=tokenizer.tgt_vocab_size, bos_token_id=config_overrides["eos_token_id"] ) return config_overrides def build(config_class, models_to_create, output_dir): """Create all models for a certain model type. Args: config_class (`PretrainedConfig`): A subclass of `PretrainedConfig` that is used to determine `models_to_create`. models_to_create (`dict`): A dictionary containing the processor/model classes that we want to create the instances. These models are of the same model type which is associated to `config_class`. output_dir (`str`): The directory to save all the checkpoints. Each model architecture will be saved in a subdirectory under it. Models in different frameworks with the same architecture will be saved in the same subdirectory. """ if data["training_ds"] is None or data["testing_ds"] is None: ds = load_dataset("Salesforce/wikitext", "wikitext-2-raw-v1") data["training_ds"] = ds["train"] data["testing_ds"] = ds["test"] if config_class.model_type in [ "encoder-decoder", "vision-encoder-decoder", "speech-encoder-decoder", "vision-text-dual-encoder", ]: return build_composite_models(config_class, output_dir) result = {k: {} for k in models_to_create} # These will be removed at the end if they are empty result["error"] = None result["warnings"] = [] # Build processors processor_classes = models_to_create["processor"] if len(processor_classes) == 0: error = f"No processor class could be found in {config_class.__name__}." fill_result_with_error(result, error, None, models_to_create) logger.error(result["error"][0]) return result for processor_class in processor_classes: try: processor = build_processor(config_class, processor_class, allow_no_checkpoint=True) if processor is not None: result["processor"][processor_class] = processor except Exception: error = f"Failed to build processor for {processor_class.__name__}." trace = traceback.format_exc() fill_result_with_error(result, error, trace, models_to_create) logger.error(result["error"][0]) return result if len(result["processor"]) == 0: error = f"No processor could be built for {config_class.__name__}." fill_result_with_error(result, error, None, models_to_create) logger.error(result["error"][0]) return result try: tiny_config = get_tiny_config(config_class) except Exception as e: error = f"Failed to get tiny config for {config_class.__name__}: {e}" trace = traceback.format_exc() fill_result_with_error(result, error, trace, models_to_create) logger.error(result["error"][0]) return result # Convert the processors (reduce vocabulary size, smaller image size, etc.) processors = list(result["processor"].values()) processor_output_folder = os.path.join(output_dir, "processors") try: processors = convert_processors(processors, tiny_config, processor_output_folder, result) except Exception: error = "Failed to convert the processors." trace = traceback.format_exc() result["warnings"].append((error, trace)) if len(processors) == 0: error = f"No processor is returned by `convert_processors` for {config_class.__name__}." fill_result_with_error(result, error, None, models_to_create) logger.error(result["error"][0]) return result try: config_overrides = get_config_overrides(config_class, processors) except Exception as e: error = f"Failure occurs while calling `get_config_overrides`: {e}" trace = traceback.format_exc() fill_result_with_error(result, error, trace, models_to_create) logger.error(result["error"][0]) return result # Just for us to see this easily in the report if "vocab_size" in config_overrides: result["vocab_size"] = config_overrides["vocab_size"] # Update attributes that `vocab_size` involves for k, v in config_overrides.items(): if hasattr(tiny_config, k): setattr(tiny_config, k, v) # So far, we only have to deal with `text_config`, as `config_overrides` contains text-related attributes only. # `FuyuConfig` saves data under both FuyuConfig and its `text_config`. This is not good, but let's just update # every involved fields to avoid potential failure. if ( hasattr(tiny_config, "text_config") and tiny_config.text_config is not None and hasattr(tiny_config.text_config, k) ): setattr(tiny_config.text_config, k, v) # If `text_config_dict` exists, we need to update its value here too in order to # make # `save_pretrained -> from_pretrained` work. if hasattr(tiny_config, "text_config_dict"): tiny_config.text_config_dict[k] = v if result["warnings"]: logger.warning(result["warnings"][0][0]) # update `result["processor"]` result["processor"] = {type(p).__name__: p.__class__.__name__ for p in processors} for pytorch_arch in models_to_create["pytorch"]: result["pytorch"][pytorch_arch.__name__] = {} error = None try: model = build_model(pytorch_arch, tiny_config, output_dir=output_dir) except Exception as e: model = None error = f"Failed to create the pytorch model for {pytorch_arch}: {e}" trace = traceback.format_exc() result["pytorch"][pytorch_arch.__name__]["model"] = model.__class__.__name__ if model is not None else None result["pytorch"][pytorch_arch.__name__]["checkpoint"] = ( get_checkpoint_dir(output_dir, pytorch_arch) if model is not None else None ) if error is not None: result["pytorch"][pytorch_arch.__name__]["error"] = (error, trace) logger.error(f"{pytorch_arch.__name__}: {error}") for tensorflow_arch in models_to_create["tensorflow"]: # Make PT/TF weights compatible pt_arch_name = tensorflow_arch.__name__[2:] # Remove `TF` pt_arch = getattr(transformers_module, pt_arch_name) result["tensorflow"][tensorflow_arch.__name__] = {} error = None if pt_arch.__name__ in result["pytorch"] and result["pytorch"][pt_arch.__name__]["checkpoint"] is not None: ckpt = get_checkpoint_dir(output_dir, pt_arch) # Use the same weights from PyTorch. try: model = tensorflow_arch.from_pretrained(ckpt) model.save_pretrained(ckpt) except Exception as e: # Conversion may fail. Let's not create a model with different weights to avoid confusion (for now). model = None error = f"Failed to convert the pytorch model to the tensorflow model for {pt_arch}: {e}" trace = traceback.format_exc() else: try: model = build_model(tensorflow_arch, tiny_config, output_dir=output_dir) except Exception as e: model = None error = f"Failed to create the tensorflow model for {tensorflow_arch}: {e}" trace = traceback.format_exc() result["tensorflow"][tensorflow_arch.__name__]["model"] = ( model.__class__.__name__ if model is not None else None ) result["tensorflow"][tensorflow_arch.__name__]["checkpoint"] = ( get_checkpoint_dir(output_dir, tensorflow_arch) if model is not None else None ) if error is not None: result["tensorflow"][tensorflow_arch.__name__]["error"] = (error, trace) logger.error(f"{tensorflow_arch.__name__}: {error}") if not result["error"]: del result["error"] if not result["warnings"]: del result["warnings"] return result def build_tiny_model_summary(results, organization=None, token=None): """Build a summary: a dictionary of the form { model architecture name: { "tokenizer_classes": [...], "processor_classes": [...], "model_classes": [...], } .. } """ tiny_model_summary = {} for config_name in results: processors = [key for key, value in results[config_name]["processor"].items()] tokenizer_classes = sorted([x for x in processors if x.endswith("TokenizerFast") or x.endswith("Tokenizer")]) processor_classes = sorted([x for x in processors if x not in tokenizer_classes]) for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: model_classes = [arch_name] base_arch_name = arch_name[2:] if arch_name.startswith("TF") else arch_name # tiny model is not created for `arch_name` if results[config_name][framework][arch_name]["model"] is None: model_classes = [] if base_arch_name not in tiny_model_summary: tiny_model_summary[base_arch_name] = {} tiny_model_summary[base_arch_name].update( { "tokenizer_classes": tokenizer_classes, "processor_classes": processor_classes, } ) tiny_model_summary[base_arch_name]["model_classes"] = sorted( tiny_model_summary[base_arch_name].get("model_classes", []) + model_classes ) if organization is not None: repo_name = f"tiny-random-{base_arch_name}" # composite models' checkpoints have more precise repo. names on the Hub. if base_arch_name in COMPOSITE_MODELS: repo_name = f"tiny-random-{COMPOSITE_MODELS[base_arch_name]}" repo_id = f"{organization}/{repo_name}" try: commit_hash = hf_api.repo_info(repo_id, token=token).sha except Exception: # The directory is not created, but processor(s) is/are included in `results`. logger.warning(f"Failed to get information for {repo_id}.\n{traceback.format_exc()}") del tiny_model_summary[base_arch_name] continue tiny_model_summary[base_arch_name]["sha"] = commit_hash return tiny_model_summary def build_failed_report(results, include_warning=True): failed_results = {} for config_name in results: if "error" in results[config_name]: if config_name not in failed_results: failed_results[config_name] = {} failed_results[config_name] = {"error": results[config_name]["error"]} if include_warning and "warnings" in results[config_name]: if config_name not in failed_results: failed_results[config_name] = {} failed_results[config_name]["warnings"] = results[config_name]["warnings"] for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: if "error" in results[config_name][framework][arch_name]: if config_name not in failed_results: failed_results[config_name] = {} if framework not in failed_results[config_name]: failed_results[config_name][framework] = {} if arch_name not in failed_results[config_name][framework]: failed_results[config_name][framework][arch_name] = {} error = results[config_name][framework][arch_name]["error"] failed_results[config_name][framework][arch_name]["error"] = error return failed_results def build_simple_report(results): text = "" failed_text = "" for config_name in results: for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: if "error" in results[config_name][framework][arch_name]: result = results[config_name][framework][arch_name]["error"] failed_text += f"{arch_name}: {result[0]}\n" else: result = ("OK",) text += f"{arch_name}: {result[0]}\n" return text, failed_text def update_tiny_model_summary_file(report_path): with open(os.path.join(report_path, "tiny_model_summary.json")) as fp: new_data = json.load(fp) with open("tests/utils/tiny_model_summary.json") as fp: data = json.load(fp) for key, value in new_data.items(): if key not in data: data[key] = value else: for attr in ["tokenizer_classes", "processor_classes", "model_classes"]: # we might get duplication here. We will remove them below when creating `updated_data`. data[key][attr].extend(value[attr]) new_sha = value.get("sha", None) if new_sha is not None: data[key]["sha"] = new_sha updated_data = {} for key in sorted(data.keys()): updated_data[key] = {} for attr, value in data[key].items(): # deduplication and sort updated_data[key][attr] = sorted(set(value)) if attr != "sha" else value with open(os.path.join(report_path, "updated_tiny_model_summary.json"), "w") as fp: json.dump(updated_data, fp, indent=4, ensure_ascii=False) def create_tiny_models( output_path, all, model_types, models_to_skip, no_check, upload, organization, token, num_workers=1, ): clone_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if os.getcwd() != clone_path: raise ValueError(f"This script should be run from the root of the clone of `transformers` {clone_path}") report_path = os.path.join(output_path, "reports") os.makedirs(report_path, exist_ok=True) _pytorch_arch_mappings = [ x for x in dir(transformers_module) if x.startswith("MODEL_") and x.endswith("_MAPPING") and x != "MODEL_NAMES_MAPPING" ] _tensorflow_arch_mappings = [ x for x in dir(transformers_module) if x.startswith("TF_MODEL_") and x.endswith("_MAPPING") ] pytorch_arch_mappings = [getattr(transformers_module, x) for x in _pytorch_arch_mappings] tensorflow_arch_mappings = [getattr(transformers_module, x) for x in _tensorflow_arch_mappings] config_classes = CONFIG_MAPPING.values() if not all: config_classes = [CONFIG_MAPPING[model_type] for model_type in model_types] # A map from config classes to tuples of processors (tokenizer, feature extractor, processor) classes processor_type_map = {c: get_processor_types_from_config_class(c) for c in config_classes} to_create = {} for c in config_classes: processors = processor_type_map[c] models = get_architectures_from_config_class(c, pytorch_arch_mappings, models_to_skip) tf_models = get_architectures_from_config_class(c, tensorflow_arch_mappings, models_to_skip) if len(models) + len(tf_models) > 0: to_create[c] = {"processor": processors, "pytorch": models, "tensorflow": tf_models} results = {} if num_workers <= 1: for c, models_to_create in list(to_create.items()): print(f"Create models for {c.__name__} ...") result = build(c, models_to_create, output_dir=os.path.join(output_path, c.model_type)) results[c.__name__] = result print("=" * 40) else: all_build_args = [] for c, models_to_create in list(to_create.items()): all_build_args.append((c, models_to_create, os.path.join(output_path, c.model_type))) with multiprocessing.Pool() as pool: results = pool.starmap(build, all_build_args) results = {buid_args[0].__name__: result for buid_args, result in zip(all_build_args, results)} if upload: if organization is None: raise ValueError("The argument `organization` could not be `None`. No model is uploaded") to_upload = [] for model_type in os.listdir(output_path): # This is the directory containing the reports if model_type == "reports": continue for arch in os.listdir(os.path.join(output_path, model_type)): if arch == "processors": continue to_upload.append(os.path.join(output_path, model_type, arch)) to_upload = sorted(to_upload) upload_results = {} if len(to_upload) > 0: for model_dir in to_upload: try: upload_model(model_dir, organization, token) except Exception as e: error = f"Failed to upload {model_dir}. {e.__class__.__name__}: {e}" logger.error(error) upload_results[model_dir] = error with open(os.path.join(report_path, "failed_uploads.json"), "w") as fp: json.dump(upload_results, fp, indent=4) # Build the tiny model summary file. The `tokenizer_classes` and `processor_classes` could be both empty lists. # When using the items in this file to update the file `tests/utils/tiny_model_summary.json`, the model # architectures with `tokenizer_classes` and `processor_classes` being both empty should **NOT** be added to # `tests/utils/tiny_model_summary.json`. tiny_model_summary = build_tiny_model_summary(results, organization=organization, token=token) with open(os.path.join(report_path, "tiny_model_summary.json"), "w") as fp: json.dump(tiny_model_summary, fp, indent=4) with open(os.path.join(report_path, "tiny_model_creation_report.json"), "w") as fp: json.dump(results, fp, indent=4) # Build the warning/failure report (json format): same format as the complete `results` except this contains only # warnings or errors. failed_results = build_failed_report(results) with open(os.path.join(report_path, "failed_report.json"), "w") as fp: json.dump(failed_results, fp, indent=4) simple_report, failed_report = build_simple_report(results) # The simplified report: a .txt file with each line of format: # {model architecture name}: {OK or error message} with open(os.path.join(report_path, "simple_report.txt"), "w") as fp: fp.write(simple_report) # The simplified failure report: same above except this only contains line with errors with open(os.path.join(report_path, "simple_failed_report.txt"), "w") as fp: fp.write(failed_report) update_tiny_model_summary_file(report_path=os.path.join(output_path, "reports")) if __name__ == "__main__": # This has to be `spawn` to avoid hanging forever! multiprocessing.set_start_method("spawn") def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--all", action="store_true", help="Will create all tiny models.") parser.add_argument( "--no_check", action="store_true", help="If set, will not check the validity of architectures. Use with caution.", ) parser.add_argument( "-m", "--model_types", type=list_str, help="Comma-separated list of model type(s) from which the tiny models will be created.", ) parser.add_argument( "--models_to_skip", type=list_str, help=( "Comma-separated list of model class names(s) from which the tiny models won't be created.\nThis is usually " "the list of model classes that have their tiny versions already uploaded to the Hub." ), ) parser.add_argument("--upload", action="store_true", help="If to upload the created tiny models to the Hub.") parser.add_argument( "--organization", default=None, type=str, help="The organization on the Hub to which the tiny models will be uploaded.", ) parser.add_argument( "--token", default=None, type=str, help="A valid authentication token for HuggingFace Hub with write access." ) parser.add_argument("output_path", type=Path, help="Path indicating where to store generated model.") parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.") args = parser.parse_args() if not args.all and not args.model_types: raise ValueError("Please provide at least one model type or pass `--all` to export all architectures.") create_tiny_models( args.output_path, args.all, args.model_types, args.models_to_skip, args.no_check, args.upload, args.organization, args.token, args.num_workers, )
transformers/utils/create_dummy_models.py/0
{ "file_path": "transformers/utils/create_dummy_models.py", "repo_id": "transformers", "token_count": 29298 }
469
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import datetime import json import os import sys import time from typing import Dict from get_ci_error_statistics import get_jobs from huggingface_hub import HfApi from notification_service import ( Message, handle_stacktraces, handle_test_results, prepare_reports, retrieve_artifact, retrieve_available_artifacts, ) from slack_sdk import WebClient api = HfApi() client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) class QuantizationMessage(Message): def __init__( self, title: str, results: Dict, ): self.title = title # Failures and success of the modeling tests self.n_success = sum(r["success"] for r in results.values()) self.single_gpu_failures = sum(r["failed"]["single"] for r in results.values()) self.multi_gpu_failures = sum(r["failed"]["multi"] for r in results.values()) self.n_failures = self.single_gpu_failures + self.multi_gpu_failures self.n_tests = self.n_failures + self.n_success self.results = results self.thread_ts = None @property def payload(self) -> str: blocks = [self.header] if self.n_failures > 0: blocks.append(self.failures_overwiew) blocks.append(self.failures_detailed) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(blocks) @property def time(self) -> str: all_results = self.results.values() time_spent = [] for r in all_results: if len(r["time_spent"]): time_spent.extend([x for x in r["time_spent"].split(", ") if len(x.strip())]) total_secs = 0 for time in time_spent: time_parts = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(time_parts) == 1: time_parts = [0, 0, time_parts[0]] hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f"{int(hours)}h{int(minutes)}m{int(seconds)}s" @property def failures_overwiew(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n" f"The suite ran in {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def failures_detailed(self) -> Dict: failures = {k: v["failed"] for k, v in self.results.items()} individual_reports = [] for key, value in failures.items(): device_report = self.get_device_report(value) if sum(value.values()): report = f"{device_report}{key}" individual_reports.append(report) header = "Single | Multi | Category\n" failures_report = prepare_reports( title="The following quantization tests had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}} def post(self): payload = self.payload print("Sending the following payload") print(json.dumps({"blocks": json.loads(payload)})) text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, blocks=payload, text=text, ) def post_reply(self): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") for job, job_result in self.results.items(): if len(job_result["failures"]): for device, failures in job_result["failures"].items(): blocks = self.get_reply_blocks( job, job_result, failures, device, text=f'Number of failures: {job_result["failed"][device]}', ) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel="#transformers-ci-daily-quantization", text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) if __name__ == "__main__": setup_status = os.environ.get("SETUP_STATUS") SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"] setup_failed = True if setup_status is not None and setup_status != "success" else False # This env. variable is set in workflow file (under the job `send_results`). ci_event = os.environ["CI_EVENT"] title = f"🤗 Results of the {ci_event} - {os.getenv('CI_TEST_JOB')}." if setup_failed: Message.error_out( title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=setup_failed ) exit(0) arguments = sys.argv[1:][0] try: quantization_matrix = ast.literal_eval(arguments) # Need to change from elements like `quantization/bnb` to `quantization_bnb` (the ones used as artifact names). quantization_matrix = [x.replace("quantization/", "quantization_") for x in quantization_matrix] except SyntaxError: Message.error_out(title, ci_title="") raise ValueError("Errored out.") available_artifacts = retrieve_available_artifacts() quantization_results = { quant: { "failed": {"single": 0, "multi": 0}, "success": 0, "time_spent": "", "failures": {}, "job_link": {}, } for quant in quantization_matrix if f"run_quantization_torch_gpu_{ quant }_test_reports" in available_artifacts } github_actions_jobs = get_jobs( workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) github_actions_job_links = {job["name"]: job["html_url"] for job in github_actions_jobs} artifact_name_to_job_map = {} for job in github_actions_jobs: for step in job["steps"]: if step["name"].startswith("Test suite reports artifacts: "): artifact_name = step["name"][len("Test suite reports artifacts: ") :] artifact_name_to_job_map[artifact_name] = job break for quant in quantization_results.keys(): for artifact_path in available_artifacts[f"run_quantization_torch_gpu_{ quant }_test_reports"].paths: artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"]) if "stats" in artifact: # Link to the GitHub Action job job = artifact_name_to_job_map[artifact_path["path"]] quantization_results[quant]["job_link"][artifact_path["gpu"]] = job["html_url"] failed, success, time_spent = handle_test_results(artifact["stats"]) quantization_results[quant]["failed"][artifact_path["gpu"]] += failed quantization_results[quant]["success"] += success quantization_results[quant]["time_spent"] += time_spent[1:-1] + ", " stacktraces = handle_stacktraces(artifact["failures_line"]) for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_path["gpu"] not in quantization_results[quant]["failures"]: quantization_results[quant]["failures"][artifact_path["gpu"]] = [] quantization_results[quant]["failures"][artifact_path["gpu"]].append( {"line": line, "trace": stacktraces.pop(0)} ) job_name = os.getenv("CI_TEST_JOB") if not os.path.isdir(os.path.join(os.getcwd(), f"ci_results_{job_name}")): os.makedirs(os.path.join(os.getcwd(), f"ci_results_{job_name}")) with open(f"ci_results_{job_name}/quantization_results.json", "w", encoding="UTF-8") as fp: json.dump(quantization_results, fp, indent=4, ensure_ascii=False) target_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml@refs/heads/main" is_scheduled_ci_run = os.environ.get("CI_WORKFLOW_REF") == target_workflow # upload results to Hub dataset (only for the scheduled daily CI run on `main`) if is_scheduled_ci_run: api.upload_file( path_or_fileobj=f"ci_results_{job_name}/quantization_results.json", path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/quantization_results.json", repo_id="hf-internal-testing/transformers_daily_ci", repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) message = QuantizationMessage( title, results=quantization_results, ) message.post() message.post_reply()
transformers/utils/notification_service_quantization.py/0
{ "file_path": "transformers/utils/notification_service_quantization.py", "repo_id": "transformers", "token_count": 4809 }
470
import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits}
transformers/utils/test_module/custom_pipeline.py/0
{ "file_path": "transformers/utils/test_module/custom_pipeline.py", "repo_id": "transformers", "token_count": 453 }
471
# pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation BASELINE_PR_TAG=v0.4.7-55-g110e672 BASELINE_PR_NAME=PR-662 python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/sentiment \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ "sentiment_tuning_step_grad_accu?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb gradient accumulation ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/gradient_accu \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ "sentiment_tuning_gpt2?tag=$BASELINE_PR_TAG&cl=sentiment gpt2 ($BASELINE_PR_NAME)" \ "sentiment_tuning_falcon_rw_1b?tag=$BASELINE_PR_TAG&cl=sentiment tiiuae/falcon-rw-1b ($BASELINE_PR_NAME)" \ "sentiment_tuning_gpt2xl_grad_accu?tag=$BASELINE_PR_TAG&cl=sentiment gpt2xl ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/different_models \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ "sentiment_tuning_peft?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb w/ peft ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/peft \ --scan-history python benchmark/upload_benchmark.py \ --folder_path="benchmark/trl/$BASELINE_PR_TAG" \ --path_in_repo="images/benchmark/$BASELINE_PR_TAG" \ --repo_id="trl-internal-testing/example-images" \ --repo_type="dataset"
trl/benchmark/plot.sh/0
{ "file_path": "trl/benchmark/plot.sh", "repo_id": "trl", "token_count": 1454 }
472
# CPO Trainer Contrastive Preference Optimization (CPO) as introduced in the paper [Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation](https://huggingface.co/papers/2401.08417) by Haoran Xu, Amr Sharaf, Yunmo Chen, Weiting Tan, Lingfeng Shen, Benjamin Van Durme, Kenton Murray, and Young Jin Kim. At a high-level, CPO trains models to avoid generating adequate, but not perfect translations in Machine Translation (MT) tasks. However, CPO is a general approximation to the DPO loss and can be applied to other domains like chat. CPO aims to mitigate two fundamental shortcomings of SFT. First, SFT’s methodology of minimizing the discrepancy between predicted outputs and gold-standard references inherently caps model performance at the quality level of the training data. Secondly, SFT lacks a mechanism to prevent the model from rejecting mistakes in translations. The CPO objective is derived from the DPO objective. ## SimPO The [SimPO](https://huggingface.co/papers/2405.14734) method is also implemented in the `CPOTrainer`. SimPO is an alternative loss that adds a reward margin, allows for length normalization, and does not use BC regularization. To use this loss, we can use SimPO easily by turning on `loss_type="simpo"` and `cpo_alpha=0` in the `CPOConfig`. ## CPO-SimPO We also offer the combined use of CPO and SimPO, which enables more stable training and improved performance. Learn more details at [CPO-SimPO Github](https://github.com/fe1ixxu/CPO_SIMPO). To use this method, simply enable SimPO by setting `loss_type="simpo"` and a non-zero `cpo_alpha` in the CPOConfig. ## Expected dataset format The CPO trainer expects a format identical to the DPO trainer, which should include three entries. These entries should be named as follows: - `prompt` - `chosen` - `rejected` for example: ```py cpo_dataset_dict = { "prompt": [ "hello", "how are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", ], "chosen": [ "hi nice to meet you", "I am fine", "My name is Mary", "My name is Mary", "Python", "Python", "Java", ], "rejected": [ "leave me alone", "I am not fine", "Whats it to you?", "I dont have a name", "Javascript", "C++", "C++", ], } ``` where the `prompt` contains the context inputs, `chosen` contains the corresponding chosen responses and `rejected` contains the corresponding negative (rejected) responses. As can be seen a prompt can have multiple responses and this is reflected in the entries being repeated in the dictionary's value arrays. ## Expected model format The CPO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function. ## Using the `CPOTrainer` For a detailed example have a look at the `examples/scripts/cpo.py` script. At a high level we need to initialize the `CPOTrainer` with a `model` we wish to train. **Note that CPOTrainer eliminates the need to use the reference model, simplifying the optimization process.** The `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. ```py cpo_config = CPOConfig( beta=0.1, ) cpo_trainer = CPOTrainer( model, args=cpo_config, train_dataset=train_dataset, tokenizer=tokenizer, ) ``` After this one can then call: ```py cpo_trainer.train() ``` ## Loss functions Given the preference data, the `CPOTrainer` uses the sigmoid loss on the normalized likelihood via the `logsigmoid` to fit a logistic regression. The [RSO](https://huggingface.co/papers/2309.06657) authors propose to use a hinge loss on the normalized likelihood from the [SLiC](https://huggingface.co/papers/2305.10425) paper. The `CPOTrainer` can be switched to this loss via the `loss_type="hinge"` argument and the `beta` in this case is the reciprocal of the margin. The [IPO](https://huggingface.co/papers/2310.12036) authors provide a deeper theoretical understanding of the CPO algorithms and identify an issue with overfitting and propose an alternative loss which can be used via the `loss_type="ipo"` argument to the trainer. Note that the `beta` parameter is the reciprocal of the gap between the log-likelihood ratios of the chosen vs the rejected completion pair and thus the smaller the `beta` the larger this gaps is. As per the paper the loss is averaged over log-likelihoods of the completion (unlike CPO which is summed only). ### For Mixture of Experts Models: Enabling the auxiliary loss MOEs are the most efficient if the load is about equally distributed between experts. To ensure that we train MOEs similarly during preference-tuning, it is beneficial to add the auxiliary loss from the load balancer to the final loss. This option is enabled by setting `output_router_logits=True` in the model config (e.g. MixtralConfig). To scale how much the auxiliary loss contributes to the total loss, use the hyperparameter `router_aux_loss_coef=...` (default: 0.001). ## Logging While training and evaluating we record the following reward metrics: * `rewards/chosen`: the mean log probabilities of the policy model for the chosen responses scaled by beta * `rewards/rejected`: the mean log probabilities of the policy model for the rejected responses scaled by beta * `rewards/accuracies`: mean of how often the chosen rewards are > than the corresponding rejected rewards * `rewards/margins`: the mean difference between the chosen and corresponding rejected rewards * `nll_loss`: the mean negative log likelihood loss of the policy model for the chosen responses ## CPOTrainer [[autodoc]] CPOTrainer ## CPOConfig [[autodoc]] CPOConfig
trl/docs/source/cpo_trainer.mdx/0
{ "file_path": "trl/docs/source/cpo_trainer.mdx", "repo_id": "trl", "token_count": 1761 }
473
# Multi Adapter RL (MARL) - a single base model for everything Here we present an approach that uses a single base model for the entire PPO algorithm - which includes retrieving the reference logits, computing the active logits and the rewards. This feature is experimental as we did not test the convergence of the approach. We encourage the community to let us know if they potentially face issues. ## Requirements You just need to install `peft` and optionally install `bitsandbytes` as well if you want to go for 8bit base models, for more memory efficient finetuning. ## Summary You need to address this approach in three stages that we summarize as follows: 1- Train a base model on the target domain (e.g. `imdb` dataset) - this is the Supervised Fine Tuning stage - it can leverage the `SFTTrainer` from TRL. 2- Train a reward model using `peft`. This is required in order to re-use the adapter during the RL optimisation process (step 3 below). We show an example of leveraging the `RewardTrainer` from TRL in [this example](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py) 3- Fine tune new adapters on the base model using PPO and the reward adapter. ("0 abstraction RL") Make sure to use the same model (i.e. same architecture and same weights) for the stages 2 & 3. ## Quickstart Let us assume you have trained your reward adapter on `llama-7b` model using `RewardTrainer` and pushed the weights on the hub under `trl-lib/llama-7b-hh-rm-adapter`. When doing PPO, before passing the model to `PPOTrainer` create your model as follows: ```python model_name = "huggyllama/llama-7b" rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter" # PPO adapter lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_name, peft_config=lora_config, reward_adapter=rm_adapter_id, ) ... trainer = PPOTrainer( model=model, ... ) ... ``` Then inside your PPO training loop, call the `compute_reward_score` method by accessing the `model` attribute from `PPOTrainer`. ```python rewards = trainer.model.compute_reward_score(**inputs) ``` ## Advanced usage ### Control on the adapter name If you are familiar with the `peft` library, you know that you can use multiple adapters inside the same model. What you can do is train multiple adapters on the same base model to fine-tune on different policies. In this case, you want to be able to control the adapter name you want to activate back, after retrieving the reward. For that, simply pass the appropriate `adapter_name` to `ppo_adapter_name` argument when calling `compute_reward_score`. ```python adapter_name_policy_1 = "policy_1" rewards = trainer.model.compute_reward_score(**inputs, ppo_adapter_name=adapter_name_policy_1) ... ``` ### Using 4-bit and 8-bit base models For more memory efficient fine-tuning, you can load your base model in 8-bit or 4-bit while keeping the adapters in the default precision (float32). Just pass the appropriate arguments (i.e. `load_in_8bit=True` or `load_in_4bit=True`) to `AutoModelForCausalLMWithValueHead.from_pretrained` as follows (assuming you have installed `bitsandbytes`): ```python model_name = "llama-7b" rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter" # PPO adapter lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_name, peft_config=lora_config, reward_adapter=rm_adapter_id, load_in_8bit=True, ) ... trainer = PPOTrainer( model=model, ... ) ... ```
trl/docs/source/multi_adapter_rl.mdx/0
{ "file_path": "trl/docs/source/multi_adapter_rl.mdx", "repo_id": "trl", "token_count": 1207 }
474
# RLHF pipeline for the creation of StackLLaMa: a Stack exchange llama-7b model. There were three main steps to the training process: 1. Supervised fine-tuning of the base llama-7b model to create llama-7b-se: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/supervised_finetuning.py --model_path=<LLAMA_MODEL_PATH> --streaming --learning_rate 1e-5 --max_steps 5000 --output_dir ./llama-se` 2. Reward modeling using dialog pairs from the SE dataset using the llama-7b-se to create llama-7b-se-rm: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/reward_modeling.py --model_name=<LLAMA_SE_MODEL>` 3. RL fine-tuning of llama-7b-se with the llama-7b-se-rm reward model: - `accelerate launch --multi_gpu --num_machines 1 --num_processes 8 examples/research_projects/stack_llama/scripts/rl_training.py --log_with=wandb --model_name=<LLAMA_SE_MODEL> --reward_model_name=<LLAMA_SE_RM_MODEL> --adafactor=False --tokenizer_name=<LLAMA_TOKENIZER> --save_freq=100 --output_max_length=128 --batch_size=8 --gradient_accumulation_steps=8 --batched_gen=True --ppo_epochs=4 --seed=0 --learning_rate=1.4e-5 --early_stopping=True --output_dir=llama-se-rl-finetune-128-8-8-1.4e-5_adam` LoRA layers were using at all stages to reduce memory requirements. At each stage the peft adapter layers were merged with the base model, using: ```shell python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --adapter_model_name=XXX --base_model_name=YYY --output_name=ZZZ ``` Note that this script requires `peft>=0.3.0`. For access to the base llama-7b model, please see Meta's [release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) and [request form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform).
trl/examples/research_projects/stack_llama/scripts/README.md/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/README.md", "repo_id": "trl", "token_count": 696 }
475
""" Run the BCO training script with the commands below. In general, the optimal configuration for BCO will be similar to that of KTO. # Full training: python examples/scripts/bco.py \ --model_name_or_path=nnheui/stablelm-2-1_6b-sft-full \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 32 \ --num_train_epochs 1 \ --learning_rate 1e-6 \ --gradient_checkpointing \ --gradient_accumulation_steps 1 \ --logging_steps 0.01 \ --eval_steps 0.2 \ --save_strategy no \ --output_dir=bco-aligned-model \ --logging_first_step \ --max_length 2048 \ --max_prompt_length 1536 \ --max_completion_length 1024 \ --no_remove_unused_columns \ --warmup_ratio 0.1 \ --bf16 \ --report_to wandb # QLoRA: python examples/scripts/bco.py \ --model_name_or_path=nnheui/stablelm-2-1_6b-sft-full \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 32 \ --num_train_epochs 1 \ --learning_rate 1e-6 \ --gradient_checkpointing \ --gradient_accumulation_steps 1 \ --logging_steps 0.01 \ --eval_steps 0.2 \ --save_strategy no \ --output_dir=bco-aligned-model-lora \ --logging_first_step \ --warmup_ratio 0.1 \ --report_to wandb \ --max_length 2048 \ --max_prompt_length 1536 \ --max_completion_length 1024 \ --no_remove_unused_columns \ --warmup_ratio 0.1 \ --bf16 \ --use_peft \ --load_in_4bit \ --lora_target_modules=all-linear \ --lora_r=16 \ --lora_alpha=16 """ import logging from dataclasses import dataclass from functools import partial from typing import Literal, Optional import torch import torch.nn.functional as F from accelerate import Accelerator, PartialState from datasets import Dataset, load_dataset from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, PreTrainedModel from trl import BCOConfig, BCOTrainer, ModelConfig, get_peft_config, setup_chat_format # Define and parse arguments. @dataclass class ScriptArguments: """ The arguments for the BCO training script. """ llm_name: Literal["gpt-3.5-turbo", "llama-2-7b-chat", "llama-2-70b-chat"] = "gpt-3.5-turbo" def build_helpfulness_dataset(llm_name: str, num_proc: Optional[int] = None) -> Dataset: """ Filter `llm_name` completions and binarize given their helpfulness score. If helpfulness score is 5, it is desirable. Otherwise, it is undesirable. """ def get_model_rating(example, metric: str, llm_name: str): try: model_index = example["models"].index(llm_name) return {metric: int(example["completions"][model_index]["annotations"][metric]["Rating"])} except ValueError as e: logging.warning(e) return -1 def get_model_response(example, llm_name: str): try: model_index = example["models"].index(llm_name) return {"response": example["completions"][model_index]["response"]} except ValueError as e: logging.warning(e) return -1 dataset = load_dataset("openbmb/UltraFeedback")["train"] ds = dataset.filter(lambda example: llm_name in example["models"], batched=False, num_proc=num_proc) ds = ds.filter( lambda example: len(example["models"]) == len(example["completions"]), batched=False, num_proc=num_proc ) METRIC = "helpfulness" ds = ds.map( get_model_rating, batched=False, fn_kwargs={"metric": METRIC, "llm_name": llm_name}, num_proc=num_proc, ) ds = ds.map( get_model_response, batched=False, fn_kwargs={"llm_name": llm_name}, num_proc=num_proc, ) ds = ds.select_columns(["source", "instruction", "response", "helpfulness"]) ds = ds.rename_columns({"instruction": "prompt", "response": "completion"}) ds = ds.map(lambda example: {"label": example["helpfulness"] >= 5}, batched=False, num_proc=num_proc) ds = ds.map( lambda example: {"prompt": [{"role": "user", "content": example["prompt"]}]}, batched=False, num_proc=num_proc, ) dataset = ds.train_test_split(test_size=0.05, seed=42) return dataset def embed_prompt(input_ids: torch.LongTensor, attention_mask: torch.LongTensor, model: PreTrainedModel): """ Borrowed from https://huggingface.co/nomic-ai/nomic-embed-text-v1.5#transformers """ def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) with torch.no_grad(): model_output = model(input_ids=input_ids, attention_mask=attention_mask) embeddings = mean_pooling(model_output, attention_mask) matryoshka_dim = 512 # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) embeddings = F.layer_norm(embeddings, normalized_shape=(embeddings.shape[1],)) embeddings = embeddings[:, :matryoshka_dim] return embeddings if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, BCOConfig, ModelConfig)) script_args, bco_args, model_args = parser.parse_args_into_dataclasses() bco_args.gradient_checkpointing_kwargs = {"use_reentrant": True} # Load a pretrained model model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) ref_model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # If we are aligning a base model, we use ChatML as the default template if tokenizer.chat_template is None: model, tokenizer = setup_chat_format(model, tokenizer) # Apply chat template def format_dataset(example): example["prompt"] = tokenizer.apply_chat_template( example["prompt"], tokenize=False, add_generation_prompt=True ) return example # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): # Load the dataset dataset = build_helpfulness_dataset(script_args.llm_name, num_proc=bco_args.dataset_num_proc) formatted_dataset = dataset.map(format_dataset, batched=False, num_proc=bco_args.dataset_num_proc) accelerator = Accelerator() embedding_model = AutoModel.from_pretrained( "nomic-ai/nomic-embed-text-v1.5", trust_remote_code=model_args.trust_remote_code, safe_serialization=True, torch_dtype=torch.bfloat16, device_map="auto", ) embedding_model = accelerator.prepare_model(embedding_model) embedding_tokenizer = AutoTokenizer.from_pretrained( "bert-base-uncased", trust_remote_code=model_args.trust_remote_code ) embedding_func = partial( embed_prompt, model=embedding_model, ) # Initialize the BCO trainer bco_trainer = BCOTrainer( model, ref_model, args=bco_args, train_dataset=formatted_dataset["train"], eval_dataset=formatted_dataset["test"], tokenizer=tokenizer, peft_config=get_peft_config(model_args), embedding_func=embedding_func, embedding_tokenizer=embedding_tokenizer, ) # Train and push the model to the Hub bco_trainer.train() bco_trainer.save_model(bco_args.output_dir)
trl/examples/scripts/bco.py/0
{ "file_path": "trl/examples/scripts/bco.py", "repo_id": "trl", "token_count": 3288 }
476
# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from parameterized import parameterized from trl import is_diffusers_available, is_peft_available from .testing_utils import require_diffusers if is_diffusers_available() and is_peft_available(): from trl import AlignPropConfig, AlignPropTrainer, DefaultDDPOStableDiffusionPipeline def scorer_function(images, prompts, metadata): return torch.randn(1) * 3.0, {} def prompt_function(): return ("cabbages", {}) @require_diffusers class AlignPropTrainerTester(unittest.TestCase): """ Test the AlignPropTrainer class. """ def setUp(self): alignprop_config = AlignPropConfig( num_epochs=2, train_gradient_accumulation_steps=1, train_batch_size=2, truncated_backprop_rand=False, mixed_precision=None, save_freq=1000000, ) pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" pretrained_revision = "main" pipeline_with_lora = DefaultDDPOStableDiffusionPipeline( pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=True ) pipeline_without_lora = DefaultDDPOStableDiffusionPipeline( pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=False ) self.trainer_with_lora = AlignPropTrainer( alignprop_config, scorer_function, prompt_function, pipeline_with_lora ) self.trainer_without_lora = AlignPropTrainer( alignprop_config, scorer_function, prompt_function, pipeline_without_lora ) def tearDown(self) -> None: gc.collect() @parameterized.expand([True, False]) def test_generate_samples(self, use_lora): trainer = self.trainer_with_lora if use_lora else self.trainer_without_lora output_pairs = trainer._generate_samples(2, with_grad=True) assert len(output_pairs.keys()) == 3 assert len(output_pairs["images"]) == 2 @parameterized.expand([True, False]) def test_calculate_loss(self, use_lora): trainer = self.trainer_with_lora if use_lora else self.trainer_without_lora sample = trainer._generate_samples(2) images = sample["images"] prompts = sample["prompts"] assert images.shape == (2, 3, 128, 128) assert len(prompts) == 2 rewards = trainer.compute_rewards(sample) loss = trainer.calculate_loss(rewards) assert torch.isfinite(loss.cpu())
trl/tests/test_alignprop_trainer.py/0
{ "file_path": "trl/tests/test_alignprop_trainer.py", "repo_id": "trl", "token_count": 1225 }
477
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from functools import partial from unittest.mock import patch import pytest import torch from transformers import AutoModelForCausalLM, AutoTokenizer from .testing_utils import is_peft_available, require_peft class DummyDataset(torch.utils.data.Dataset): def __init__(self, query_data, response_data): self.query_data = query_data self.response_data = response_data def __len__(self): return len(self.query_data) def __getitem__(self, idx): return self.query_data[idx], self.response_data[idx] EXPECTED_STATS = [ "objective/kl", "objective/kl_dist", "objective/logprobs", "objective/ref_logprobs", "objective/kl_coef", "objective/entropy", "ppo/mean_non_score_reward", "ppo/loss/policy", "ppo/loss/value", "ppo/loss/total", "ppo/policy/entropy", "ppo/policy/approxkl", "ppo/policy/policykl", "ppo/policy/clipfrac", "ppo/policy/advantages", "ppo/policy/advantages_mean", "ppo/policy/ratio", "ppo/returns/mean", "ppo/returns/var", "ppo/val/vpred", "ppo/val/error", "ppo/val/clipfrac", "ppo/val/mean", "ppo/val/var", "ppo/val/var_explained", "time/ppo/forward_pass", "time/ppo/compute_rewards", "time/ppo/optimize_step", "time/ppo/calc_stats", "time/ppo/total", "ppo/learning_rate", ] @require_peft class TestPeftDependancy(unittest.TestCase): def setUp(self): self.causal_lm_model_id = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM" self.seq_to_seq_model_id = "trl-internal-testing/tiny-random-T5ForConditionalGeneration" if is_peft_available(): from peft import LoraConfig, get_peft_model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) self.peft_model = get_peft_model(causal_lm_model, lora_config) def test_no_peft(self): with patch.dict(sys.modules, {"peft": None}): from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead # Check that loading a model with `peft` will raise an error with pytest.raises(ModuleNotFoundError): import peft # noqa: F401 _trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id) _trl_seq2seq_model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(self.seq_to_seq_model_id) def test_imports_no_peft(self): with patch.dict(sys.modules, {"peft": None}): from trl import ( # noqa: F401 AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, PreTrainedModelWrapper, ) def test_ppo_trainer_no_peft(self): with patch.dict(sys.modules, {"peft": None}): from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer ppo_model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(ppo_model_id) tokenizer = AutoTokenizer.from_pretrained(ppo_model_id) tokenizer.pad_token_id = tokenizer.eos_token_id ppo_config = PPOConfig(batch_size=2, mini_batch_size=1, log_with=None) dummy_dataset = DummyDataset( [torch.LongTensor([0, 1, 0, 1, 0, 1]), torch.LongTensor([0, 1, 0, 1, 0, 1])], [torch.LongTensor([1, 0, 1, 0, 1, 0]), torch.LongTensor([0, 1, 0, 1, 0, 1])], ) ppo_trainer = PPOTrainer( config=ppo_config, model=trl_model, ref_model=None, tokenizer=tokenizer, dataset=dummy_dataset, ) ppo_trainer.optimizer.zero_grad = partial(ppo_trainer.optimizer.zero_grad, set_to_none=False) dummy_dataloader = ppo_trainer.dataloader for query_tensor, response_tensor in dummy_dataloader: # define a reward for response # (this could be any reward such as human feedback or output from another model) reward = [torch.tensor(1.0), torch.tensor(0.0)] # train model train_stats = ppo_trainer.step(list(query_tensor), list(response_tensor), reward) break # check gradients are not None for _, param in trl_model.named_parameters(): if param.requires_grad: assert param.grad is not None # check expected stats for stat in EXPECTED_STATS: assert stat in train_stats
trl/tests/test_no_peft.py/0
{ "file_path": "trl/tests/test_no_peft.py", "repo_id": "trl", "token_count": 2576 }
478
# This file is a copy of trl/examples/scripts/sft.py so that we could # use it together with rich and the TRL CLI in a more customizable manner. # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import sys from subprocess import CalledProcessError from rich.console import Console SUPPORTED_COMMANDS = ["sft", "dpo", "chat"] def main(): console = Console() # Make sure to import things locally to avoid verbose from third party libs. with console.status("[bold purple]Welcome! Initializing the TRL CLI..."): from trl.commands.cli_utils import init_zero_verbose init_zero_verbose() command_name = sys.argv[1] if command_name not in SUPPORTED_COMMANDS: raise ValueError( f"Please use one of the supported commands, got {command_name} - supported commands are {SUPPORTED_COMMANDS}" ) trl_examples_dir = os.path.dirname(__file__) # Force-use rich if the `TRL_USE_RICH` env var is not set if "TRL_USE_RICH" not in os.environ: os.environ["TRL_USE_RICH"] = "1" if command_name == "chat": command = f""" python {trl_examples_dir}/scripts/{command_name}.py {" ".join(sys.argv[2:])} """ else: command = f""" accelerate launch {trl_examples_dir}/scripts/{command_name}.py {" ".join(sys.argv[2:])} """ try: subprocess.run( command.split(), text=True, check=True, encoding="utf-8", cwd=os.getcwd(), env=os.environ.copy(), ) except (CalledProcessError, ChildProcessError) as exc: console.log(f"TRL - {command_name.upper()} failed on ! See the logs above for further details.") raise ValueError("TRL CLI failed! Check the traceback above..") from exc if __name__ == "__main__": main()
trl/trl/commands/cli.py/0
{ "file_path": "trl/trl/commands/cli.py", "repo_id": "trl", "token_count": 929 }
479
import itertools from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union from accelerate.utils import is_deepspeed_available from transformers import PreTrainedModel, PreTrainedTokenizer from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead SUPPORTED_ARCHITECTURES = ( AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, ) if is_deepspeed_available(): import deepspeed if TYPE_CHECKING: from accelerate import Accelerator from deepspeed.runtime.engine import DeepSpeedEngine from torch.nn.parallel.distributed import DistributedDataParallel from .modeling_base import PreTrainedModelWrapper # TODO: Add Abstract Base Class if more formats are added @dataclass class ChatMlSpecialTokens: """Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens.""" bos_token: str = "<|im_start|>" eos_token: str = "<|im_end|>" pad_token: str = "<|im_end|>" @property def system(self): return f"{self.bos_token}system" @property def user(self): return f"{self.bos_token}user" @property def assistant(self): return f"{self.bos_token}assistant" @property def chat_template(self): return ( "{% for message in messages %}" f"{{{{'{self.bos_token}' + message['role'] + '\n' + message['content'] + '{self.eos_token}' + '\n'}}}}" "{% endfor %}" "{% if add_generation_prompt %}" f"{{{{ '{self.assistant}\n' }}}}" "{% endif %}" ) FORMAT_MAPPING = {"chatml": ChatMlSpecialTokens} def setup_chat_format( model: PreTrainedModel, tokenizer: PreTrainedTokenizer, format: Optional[Literal["chatml"]] = "chatml", resize_to_multiple_of: Optional[int] = None, ) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: """ Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens. Args: model (`~transformers.PreTrainedModel`): The model to be modified. tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified. format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml". resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None. Returns: model (`~transformers.PreTrainedModel`): The modified model. tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer. """ # check if format available and retrieve if format not in FORMAT_MAPPING: raise ValueError(f"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}") chat_format = FORMAT_MAPPING[format]() # set special tokens and them tokenizer.eos_token = chat_format.eos_token tokenizer.pad_token = chat_format.pad_token tokenizer.bos_token = chat_format.bos_token tokenizer.add_special_tokens({"additional_special_tokens": [chat_format.bos_token, chat_format.eos_token]}) # set chat format for tokenizer tokenizer.chat_template = chat_format.chat_template # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377 model.resize_token_embeddings( len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None ) # Update the model config to use the new eos & bos tokens if getattr(model, "config", None) is not None: model.config.pad_token_id = tokenizer.pad_token_id model.config.bos_token_id = tokenizer.bos_token_id model.config.eos_token_id = tokenizer.eos_token_id # Update the generation config to use the new eos & bos token if getattr(model, "generation_config", None) is not None: model.generation_config.bos_token_id = tokenizer.bos_token_id model.generation_config.eos_token_id = tokenizer.eos_token_id model.generation_config.pad_token_id = tokenizer.pad_token_id return model, tokenizer def remove_hooks(model: "DeepSpeedEngine") -> None: """Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.""" if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer for param in iter_params(optimizer_offload.module, recurse=True): param.ds_active_sub_modules.clear() for hook in optimizer_offload.forward_hooks: hook.remove() for hook in optimizer_offload.backward_hooks: hook.remove() optimizer_offload.forward_hooks = [] optimizer_offload.backward_hooks = [] def get_all_parameters(sub_module, recurse=False): return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) def iter_params(module, recurse=False): return [param for _, param in get_all_parameters(module, recurse)] def add_hooks(model: "DeepSpeedEngine") -> None: """Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.""" if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer optimizer_offload._register_hooks_recursively(optimizer_offload.module) @contextmanager def unwrap_model_for_generation( model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False ) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]: """Context manager to unwrap a model for generation. For ZeRO-3 models, we gather the weights once to speed up generation. """ unwrapped_model = accelerator.unwrap_model(model) if is_peft_model: unwrapped_model.pretrained_model.disable_adapter() if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3: with deepspeed.zero.GatheredParameters(model.parameters()): remove_hooks(model) yield accelerator.unwrap_model(model) add_hooks(model) else: yield unwrapped_model
trl/trl/models/utils.py/0
{ "file_path": "trl/trl/models/utils.py", "repo_id": "trl", "token_count": 2369 }
480
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Optional from transformers import TrainingArguments @dataclass class KTOConfig(TrainingArguments): r""" KTOConfig collects all training arguments related to the [`KTOTrainer`] class. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int`, *optional*, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int`, *optional*, defaults to `None`): The maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int`, *optional*, defaults to `None`): The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, defaults to 0.1): The beta factor in KTO loss. Higher beta means less divergence from the initial policy. desirable_weight (`float`, *optional*, defaults to 1.0): The desirable losses are weighed by this factor to counter unequal number of desirable and undesirable paris. undesirable_weight (`float`, *optional*, defaults to 1.0): The undesirable losses are weighed by this factor to counter unequal number of desirable and undesirable pairs. label_pad_token_id (`int`, defaults to `-100`): The label pad token id. This argument is required if you want to use the default data collator. padding_value (`int`, defaults to `0`): The padding value if it is different to the tokenizer's pad_token_id. truncation_mode (`str`, defaults to `keep_end`): The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, defaults to `False`): Whether to sample and log generations during evaluation step. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): If no model is provided, we need to know if the model_init returns an encoder-decoder. precompute_ref_log_probs (`bool`, defaults to `False`): Flag to precompute reference model log probabilities for training and evaluation datasets. This is useful if you want to train without the reference model and reduce the total GPU memory needed. model_init_kwargs: (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the model from a string. ref_model_init_kwargs: (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the ref model from a string. dataset_num_proc: (`Optional[int]`, *optional*, defaults to `None`): Number of processes to use for processing the datasets. """ max_length: Optional[int] = None """The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.""" max_prompt_length: Optional[int] = None """The maximum length of the prompt. This argument is required if you want to use the default data collator.""" max_completion_length: Optional[int] = None """The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder.""" beta: float = 0.1 """The beta factor in KTO loss. Higher beta means less divergence from the initial policy.""" desirable_weight: Optional[float] = 1.0 """The desirable losses are weighed by this factor.""" undesirable_weight: Optional[float] = 1.0 """The undesirable losses are weighed by this factor.""" label_pad_token_id: int = -100 padding_value: int = None truncation_mode: str = "keep_end" generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None precompute_ref_log_probs: bool = False model_init_kwargs: Optional[Dict] = None ref_model_init_kwargs: Optional[Dict] = None dataset_num_proc: Optional[int] = None
trl/trl/trainer/kto_config.py/0
{ "file_path": "trl/trl/trainer/kto_config.py", "repo_id": "trl", "token_count": 1587 }
481
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import inspect import warnings from functools import wraps from typing import Callable, Dict, List, Optional, Tuple, Union import datasets import torch import torch.nn as nn from accelerate.state import PartialState from datasets import Dataset from datasets.arrow_writer import SchemaInferenceError from datasets.builder import DatasetGenerationError from huggingface_hub.utils._deprecation import _deprecate_arguments from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollator, DataCollatorForLanguageModeling, PreTrainedModel, PreTrainedTokenizerBase, Trainer, ) from transformers.modeling_utils import unwrap_model from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalPrediction from ..extras.dataset_formatting import get_formatting_func_from_dataset from ..import_utils import is_liger_available, is_peft_available from .callbacks import RichProgressCallback from .sft_config import SFTConfig from .utils import ( ConstantLengthDataset, DataCollatorForCompletionOnlyLM, neftune_post_forward_hook, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging, ) if is_peft_available(): from peft import PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training if is_liger_available(): from liger_kernel.transformers import AutoLigerKernelForCausalLM class SFTTrainer(Trainer): r""" Class definition of the Supervised Finetuning Trainer (SFT Trainer). This class is a wrapper around the `transformers.Trainer` class and inherits all of its attributes and methods. The trainer takes care of properly initializing the PeftModel in case a user passes a `PeftConfig` object. Args: model (Union[`transformers.PreTrainedModel`, `nn.Module`, `str`]): The model to train, can be a `PreTrainedModel`, a `torch.nn.Module` or a string with the model name to load from cache or download. The model can be also converted to a `PeftModel` if a `PeftConfig` object is passed to the `peft_config` argument. args (`Optional[SFTConfig]`): The arguments to tweak for training. Will default to a basic instance of [`SFTConfig`] with the `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. data_collator (`Optional[transformers.DataCollator]`): The data collator to use for training. train_dataset (`Optional[datasets.Dataset]`): The dataset to use for training. We recommend users to use `trl.trainer.ConstantLengthDataset` to create their dataset. eval_dataset (Optional[Union[`datasets.Dataset`, Dict[`str`, `datasets.Dataset`]]]): The dataset to use for evaluation. We recommend users to use `trl.trainer.ConstantLengthDataset` to create their dataset. tokenizer (`Optional[transformers.PreTrainedTokenizer]`): The tokenizer to use for training. If not specified, the tokenizer associated to the model will be used. model_init (`Callable[[], transformers.PreTrainedModel]`): The model initializer to use for training. If None is specified, the default model initializer will be used. compute_metrics (`Callable[[transformers.EvalPrediction], Dict]`, *optional* defaults to None): The function used to compute metrics during evaluation. It should return a dictionary mapping metric names to metric values. If not specified, only the loss will be computed during evaluation. callbacks (`List[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. peft_config (`Optional[PeftConfig]`): The PeftConfig object to use to initialize the PeftModel. formatting_func (`Optional[Callable]`): The formatting function to be used for creating the `ConstantLengthDataset`. """ _tag_names = ["trl", "sft"] @_deprecate_arguments( version="1.0.0", deprecated_args=[ "dataset_text_field", "packing", "max_seq_length", "dataset_num_proc", "dataset_batch_size", "neftune_noise_alpha", "model_init_kwargs", "dataset_kwargs", "eval_packing", "num_of_sequences", "chars_per_token", ], custom_message="Deprecated positional argument(s) used in SFTTrainer, please use the SFTConfig to set these arguments instead.", ) def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, args: Optional[SFTConfig] = None, data_collator: Optional[DataCollator] = None, # type: ignore train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional["PeftConfig"] = None, dataset_text_field: Optional[str] = None, packing: Optional[bool] = False, formatting_func: Optional[Callable] = None, max_seq_length: Optional[int] = None, infinite: Optional[bool] = None, num_of_sequences: Optional[int] = None, chars_per_token: Optional[float] = None, dataset_num_proc: Optional[int] = None, dataset_batch_size: Optional[int] = None, neftune_noise_alpha: Optional[float] = None, model_init_kwargs: Optional[Dict] = None, dataset_kwargs: Optional[Dict] = None, eval_packing: Optional[bool] = None, ): if args is None: output_dir = "tmp_trainer" warnings.warn(f"No `SFTConfig` passed, using `output_dir={output_dir}`.") args = SFTConfig(output_dir=output_dir) elif args is not None and args.__class__.__name__ == "TrainingArguments": args_as_dict = args.to_dict() # Manually copy token values as TrainingArguments.to_dict() redacts them args_as_dict.update({k: getattr(args, k) for k in args_as_dict.keys() if k.endswith("_token")}) args = SFTConfig(**args_as_dict) if model_init_kwargs is not None: warnings.warn( "You passed `model_init_kwargs` to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.model_init_kwargs = model_init_kwargs if getattr(args, "model_init_kwargs", None) is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError("You passed model_init_kwargs to the SFTConfig, but your model is already instantiated.") else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get("torch_dtype") if torch_dtype is not None: # Convert to `torch.dtype` if an str is passed if isinstance(torch_dtype, str) and torch_dtype != "auto": torch_dtype = getattr(torch, torch_dtype) if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype): raise ValueError( f"Invalid `torch_dtype` passed to the SFTConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}." ) model_init_kwargs["torch_dtype"] = torch_dtype if infinite is not None: warnings.warn( "The `infinite` argument is deprecated and will be removed in a future version of TRL. Use `TrainingArguments.max_steps` or `TrainingArguments.num_train_epochs` instead to control training length." ) if isinstance(model, str): warnings.warn( "You passed a model_id to the SFTTrainer. This will automatically create an " "`AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you." ) if args.use_liger: model = AutoLigerKernelForCausalLM.from_pretrained(model, **model_init_kwargs) else: model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) if packing: warnings.warn( "You passed a `packing` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.packing = packing if eval_packing is not None: warnings.warn( "You passed a `eval_packing` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.eval_packing = eval_packing if args.packing and data_collator is not None and isinstance(data_collator, DataCollatorForCompletionOnlyLM): raise ValueError( "You passed a `DataCollatorForCompletionOnlyLM` to the SFTTrainer. This is not compatible with the `packing` argument." ) if is_peft_available() and peft_config is not None: if not isinstance(peft_config, PeftConfig): raise ValueError( "If you want to use the PeftModel, you need to pass a PeftConfig object to the SFTTrainer." f" and you passed a {type(peft_config)}." ) if not isinstance(model, PeftModel): _support_gc_kwargs = hasattr( args, "gradient_checkpointing_kwargs" ) and "gradient_checkpointing_kwargs" in list( inspect.signature(prepare_model_for_kbit_training).parameters ) gradient_checkpointing_kwargs = getattr(args, "gradient_checkpointing_kwargs", None) or {} is_sharded_qlora = False # Below is to support QLoRA + FSDP / DS-Zero3 - one should never call # peft_module_casting_to_bf16 or prepare_model_for_kbit_training when doing # QLoRA + FSDP / DS-Zero3 if getattr(model, "is_loaded_in_4bit", False): for _, param in model.named_parameters(): if param.__class__.__name__ == "Params4bit": is_sharded_qlora = param.data.device.type == "cpu" break if getattr(model, "is_loaded_in_8bit", False) or ( getattr(model, "is_loaded_in_4bit", False) and not is_sharded_qlora ): prepare_model_kwargs = { "use_gradient_checkpointing": getattr(args, "gradient_checkpointing", False) } if _support_gc_kwargs: prepare_model_kwargs["gradient_checkpointing_kwargs"] = gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) if args is not None: args = dataclasses.replace(args, gradient_checkpointing=False) elif getattr(args, "gradient_checkpointing", False) and ( "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"] ): # For backward compatibility with older versions of transformers if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if ( "autocast_adapter_dtype" in list(inspect.signature(get_peft_model).parameters) and getattr(model, "is_loaded_in_4bit", False) and is_sharded_qlora ): model = get_peft_model(model, peft_config, autocast_adapter_dtype=False) else: model = get_peft_model(model, peft_config) if ( args is not None and args.bf16 and getattr(model, "is_loaded_in_4bit", False) and not is_sharded_qlora ): peft_module_casting_to_bf16(model) if tokenizer is None: tokenizer = AutoTokenizer.from_pretrained(model.config._name_or_path) if getattr(tokenizer, "pad_token", None) is None: tokenizer.pad_token = tokenizer.eos_token if max_seq_length is not None: warnings.warn( "You passed a `max_seq_length` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.max_seq_length = max_seq_length if args.max_seq_length is None: # to overcome some issues with broken tokenizers args.max_seq_length = min(tokenizer.model_max_length, 1024) warnings.warn( f"You didn't pass a `max_seq_length` argument to the SFTTrainer, this will default to {args.max_seq_length}" ) if dataset_num_proc is not None: warnings.warn( "You passed a `dataset_num_proc` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.dataset_num_proc = dataset_num_proc self.dataset_num_proc = args.dataset_num_proc if dataset_batch_size is not None: warnings.warn( "You passed a `dataset_batch_size` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.dataset_batch_size = dataset_batch_size self.dataset_batch_size = args.dataset_batch_size self._trainer_supports_neftune = hasattr(args, "neftune_noise_alpha") if neftune_noise_alpha is not None and self._trainer_supports_neftune: args.neftune_noise_alpha = neftune_noise_alpha warnings.warn( "You passed a `neftune_noise_alpha` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) # self.neftune_noise_alpha is done at Trainer level elif not self._trainer_supports_neftune: self.neftune_noise_alpha = neftune_noise_alpha if dataset_text_field is not None: warnings.warn( "You passed a `dataset_text_field` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.dataset_text_field = dataset_text_field if dataset_kwargs is not None: warnings.warn( "You passed a `dataset_kwargs` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.dataset_kwargs = dataset_kwargs if args.dataset_kwargs is None: args.dataset_kwargs = {} if formatting_func is None and args.dataset_text_field is None: # check if dataset has ChatML format or instruction format and is supported # if not stays #None formatting_func = get_formatting_func_from_dataset(train_dataset, tokenizer) # if a template is detected, we don't need to add special tokens again if formatting_func is not None: args.dataset_kwargs["add_special_tokens"] = False if not args.packing: # If we aren't skipping data preparation, then a dataset_text_field # or formatting_func must be provided. if ( args.dataset_text_field is None and formatting_func is None and not args.dataset_kwargs.get("skip_prepare_dataset", False) ): raise ValueError( "You passed `packing=False` to the SFTTrainer/SFTConfig, but you didn't pass a `dataset_text_field` or `formatting_func` argument." ) if data_collator is None: data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) if num_of_sequences is not None: warnings.warn( "You passed a `num_of_sequences` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.num_of_sequences = num_of_sequences if chars_per_token is not None: warnings.warn( "You passed a `chars_per_token` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`." ) args.chars_per_token = chars_per_token # Pre-process the datasets only once per node. The remaining processes will use the cache. with PartialState().local_main_process_first(): if train_dataset is not None: train_dataset = self._prepare_dataset( train_dataset, tokenizer, args.packing, args.dataset_text_field, args.max_seq_length, formatting_func, args.num_of_sequences, args.chars_per_token, remove_unused_columns=args.remove_unused_columns if args is not None else True, **args.dataset_kwargs, ) if eval_dataset is not None: _multiple = isinstance(eval_dataset, dict) _eval_datasets = eval_dataset if _multiple else {"singleton": eval_dataset} eval_packing = args.packing if args.eval_packing is None else args.eval_packing for _eval_dataset_name, _eval_dataset in _eval_datasets.items(): _eval_datasets[_eval_dataset_name] = self._prepare_dataset( _eval_dataset, tokenizer, eval_packing, args.dataset_text_field, args.max_seq_length, formatting_func, args.num_of_sequences, args.chars_per_token, remove_unused_columns=args.remove_unused_columns if args is not None else True, **args.dataset_kwargs, ) if not _multiple: eval_dataset = _eval_datasets["singleton"] if tokenizer.padding_side is not None and tokenizer.padding_side != "right": warnings.warn( "You passed a tokenizer with `padding_side` not equal to `right` to the SFTTrainer. This might lead to some unexpected behaviour due to " "overflow issues when training a model in half-precision. You might consider adding `tokenizer.padding_side = 'right'` to your code." ) super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) if self.args.max_steps > 0 and args.packing: warnings.warn( "You passed `packing=True` to the SFTTrainer/SFTConfig, and you are training your model with `max_steps` strategy. The dataset will be iterated until the `max_steps` are reached." ) self.train_dataset.infinite = True elif self.args.max_steps == -1 and args.packing: self.train_dataset.infinite = False if any(isinstance(callback, RichProgressCallback) for callback in self.callback_handler.callbacks): for callback in self.callback_handler.callbacks: # Remove the PrinterCallback to avoid duplicated prints in case we passed a `RichProgressCallback` if callback.__class__.__name__ == "PrinterCallback": self.callback_handler.pop_callback(callback) @wraps(Trainer.train) def train(self, *args, **kwargs): # Activate neftune right before training. if self.neftune_noise_alpha is not None and not self._trainer_supports_neftune: self.model = self._trl_activate_neftune(self.model) output = super().train(*args, **kwargs) # After training we make sure to retrieve back the original forward pass method # for the embedding layer by removing the forward post hook. if self.neftune_noise_alpha is not None and not self._trainer_supports_neftune: unwrapped_model = unwrap_model(self.model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() self.neftune_hook_handle.remove() del embeddings.neftune_noise_alpha return output @wraps(Trainer.push_to_hub) def push_to_hub( self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs, ) -> str: """ Overwrite the `push_to_hub` method in order to force-add the tag "sft" when pushing the model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details. Unlike the parent class, we don't use the `token` argument to mitigate security risks. """ kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) def _prepare_dataset( self, dataset, tokenizer, packing, dataset_text_field, max_seq_length, formatting_func, num_of_sequences, chars_per_token, remove_unused_columns=True, append_concat_token=True, add_special_tokens=True, skip_prepare_dataset=False, ): if dataset is None: raise ValueError("The dataset should not be None") if skip_prepare_dataset: return dataset # If the dataset is already preprocessed (tokenized), return as-is. Only works if dataset is # a datasets.Dataset or datasets.IterableDataset -- not for torch Dataset column_names = ( dataset.column_names if isinstance(dataset, (datasets.Dataset, datasets.IterableDataset)) else None ) if column_names and "input_ids" in column_names: if formatting_func is not None: warnings.warn( "You passed a dataset that is already processed (contains an `input_ids` field) together with a valid formatting function. Therefore `formatting_func` will be ignored." ) return dataset # check if torch dataset / dataloader and do nothing # see https://github.com/huggingface/trl/pull/1468 for why datasets.IterableDataset needs a separate check if isinstance( dataset, (torch.utils.data.IterableDataset, torch.utils.data.Dataset, ConstantLengthDataset) ) and not isinstance(dataset, datasets.IterableDataset): return dataset if not packing: return self._prepare_non_packed_dataloader( tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func, add_special_tokens, remove_unused_columns, ) else: return self._prepare_packed_dataloader( tokenizer, dataset, dataset_text_field, max_seq_length, num_of_sequences, chars_per_token, formatting_func, append_concat_token, add_special_tokens, ) def _prepare_non_packed_dataloader( self, tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func=None, add_special_tokens=True, remove_unused_columns=True, ): use_formatting_func = formatting_func is not None and dataset_text_field is None self._dataset_sanity_checked = False # Inspired from: https://huggingface.co/learn/nlp-course/chapter7/6?fw=pt def tokenize(element): outputs = tokenizer( element[dataset_text_field] if not use_formatting_func else formatting_func(element), add_special_tokens=add_special_tokens, truncation=True, padding=False, max_length=max_seq_length, return_overflowing_tokens=False, return_length=False, ) if use_formatting_func and not self._dataset_sanity_checked: if not isinstance(formatting_func(element), list): raise ValueError( "The `formatting_func` should return a list of processed strings since it can lead to silent bugs." ) else: self._dataset_sanity_checked = True return {"input_ids": outputs["input_ids"], "attention_mask": outputs["attention_mask"]} signature_columns = ["input_ids", "labels", "attention_mask"] if dataset.column_names is not None: # None for IterableDataset extra_columns = list(set(dataset.column_names) - set(signature_columns)) else: extra_columns = [] if not remove_unused_columns and len(extra_columns) > 0: warnings.warn( "You passed `remove_unused_columns=False` on a non-packed dataset. This might create some issues with the default collator and yield to errors. If you want to " f"inspect dataset other columns (in this case {extra_columns}), you can subclass `DataCollatorForLanguageModeling` in case you used the default collator and create your own data collator in order to inspect the unused dataset columns." ) map_kwargs = { "batched": True, "remove_columns": dataset.column_names if remove_unused_columns else None, "batch_size": self.dataset_batch_size, } if isinstance(dataset, datasets.Dataset): map_kwargs["num_proc"] = self.dataset_num_proc # this arg is not available for IterableDataset tokenized_dataset = dataset.map(tokenize, **map_kwargs) return tokenized_dataset def _prepare_packed_dataloader( self, tokenizer, dataset, dataset_text_field, max_seq_length, num_of_sequences, chars_per_token, formatting_func=None, append_concat_token=True, add_special_tokens=True, ): if dataset_text_field is not None or formatting_func is not None: if tokenizer is None: raise ValueError("You need to pass a tokenizer when using `dataset_text_field` with `SFTTrainer`.") constant_length_iterator = ConstantLengthDataset( tokenizer, dataset, dataset_text_field=dataset_text_field, formatting_func=formatting_func, seq_length=max_seq_length, infinite=False, num_of_sequences=num_of_sequences, chars_per_token=chars_per_token, eos_token_id=tokenizer.eos_token_id, append_concat_token=append_concat_token, add_special_tokens=add_special_tokens, ) if isinstance(dataset, datasets.IterableDataset): return constant_length_iterator def data_generator(constant_length_iterator): yield from constant_length_iterator try: packed_dataset = Dataset.from_generator( data_generator, gen_kwargs={"constant_length_iterator": constant_length_iterator} ) except (DatasetGenerationError, SchemaInferenceError) as exc: raise ValueError( "Error occurred while packing the dataset. " "Make sure that your dataset has enough samples to at least yield one packed sequence." ) from exc return packed_dataset else: raise ValueError( "You need to pass a `dataset_text_field` or `formatting_func` argument to the SFTTrainer if you want to use the `ConstantLengthDataset`." ) def _trl_activate_neftune(self, model): r""" Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://huggingface.co/papers/2310.05914 Since in transformers Trainer we do have an `_activate_neftune` method, we need to rename this method to avoid conflicts. """ unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() embeddings.neftune_noise_alpha = self.neftune_noise_alpha hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) self.neftune_hook_handle = hook_handle return model
trl/trl/trainer/sft_trainer.py/0
{ "file_path": "trl/trl/trainer/sft_trainer.py", "repo_id": "trl", "token_count": 14380 }
482
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for DDP training. """ from unittest.mock import patch import deepspeed import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from transformer_engine.common.recipe import DelayedScaling from accelerate import Accelerator, DeepSpeedPlugin from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(zero_stage: int = 1): # This forces transformers to think Zero-3 Init should be used with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock: mock.return_value = zero_stage == 3 set_seed(42) accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) new_named_params = get_named_parameters(model) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) import numpy as np config = { "train_batch_size": 32, "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": { "stage": zero_stage, "offload_optimizer": {"device": "none", "nvme_path": None}, "offload_param": {"device": "none", "nvme_path": None}, "stage3_gather_16bit_weights_on_model_save": False, }, "gradient_clipping": 1.0, "steps_per_print": np.inf, "bf16": {"enabled": True}, "fp16": {"enabled": False}, "zero_allow_untested_optimizer": True, } ( model, optimizer, _, _, ) = deepspeed.initialize( model=model, optimizer=optimizer, config_params=config, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() model_outputs = [] data = [] for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): outputs = model(**batch) data.append(batch.to("cpu")) model_outputs.append(outputs.logits.to("cpu")) loss = outputs.loss model.backward(loss) model.step() for _ in range(accelerator.num_processes): lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results, model_outputs, data def train_integration(zero_stage: int = 1): set_seed(42) FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) deepspeed_plugin = DeepSpeedPlugin( zero_stage=zero_stage, zero3_init_flag=zero_stage == 3, ) accelerator = Accelerator( mixed_precision="fp8", kwargs_handlers=kwargs_handlers, deepspeed_plugin=deepspeed_plugin ) accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16 model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() model_outputs = [] data = [] for _ in range(2): for batch in train_dataloader: outputs = model(**batch) data.append(batch.to("cpu")) model_outputs.append(outputs.logits.to("cpu")) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results, model_outputs, data if __name__ == "__main__": # for zero_stage in [1, 2, 3]: zero_stage = 1 baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage) accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(zero_stage) assert ( baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"] ), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}' assert ( baseline_not_trained["f1"] == accelerator_not_trained["f1"] ), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}' assert ( baseline_trained["accuracy"] == accelerator_trained["accuracy"] ), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}' assert ( baseline_trained["f1"] == accelerator_trained["f1"] ), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}' torch.distributed.destroy_process_group()
accelerate/benchmarks/fp8/distrib_deepspeed.py/0
{ "file_path": "accelerate/benchmarks/fp8/distrib_deepspeed.py", "repo_id": "accelerate", "token_count": 2963 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Overview Welcome to the 🤗 Accelerate tutorials! These introductory guides will help catch you up to speed on working with 🤗 Accelerate. You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly, and more! These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework. If you have any questions about 🤗 Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).
accelerate/docs/source/basic_tutorials/overview.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/overview.md", "repo_id": "accelerate", "token_count": 312 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Checkpointing When training a PyTorch model with 🤗 Accelerate, you may often want to save and continue a state of training. Doing so requires saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside 🤗 Accelerate are two convenience functions to achieve this quickly: - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state` To further customize where and how states are saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts. - By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions, so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler. Below is a brief example using checkpointing to save and reload a state during training: ```python from accelerate import Accelerator import torch accelerator = Accelerator(project_dir="my/save/path") my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99) my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader) # Register the LR scheduler accelerator.register_for_checkpointing(my_scheduler) # Save the starting state accelerator.save_state() device = accelerator.device my_model.to(device) # Perform training for epoch in range(num_epochs): for batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) accelerator.backward(loss) my_optimizer.step() my_scheduler.step() # Restore the previous state accelerator.load_state("my/save/path/checkpointing/checkpoint_0") ``` ## Restoring the state of the DataLoader After resuming from a checkpoint, it may also be desirable to resume from a particular point in the active `DataLoader` if the state was saved during the middle of an epoch. You can use [`~Accelerator.skip_first_batches`] to do so. ```python from accelerate import Accelerator accelerator = Accelerator(project_dir="my/save/path") train_dataloader = accelerator.prepare(train_dataloader) accelerator.load_state("my_state") # Assume the checkpoint was saved 100 steps into the epoch skipped_dataloader = accelerator.skip_first_batches(train_dataloader, 100) # After the first iteration, go back to `train_dataloader` # First epoch for batch in skipped_dataloader: # Do something pass # Second epoch for batch in train_dataloader: # Do something pass ```
accelerate/docs/source/usage_guides/checkpoint.md/0
{ "file_path": "accelerate/docs/source/usage_guides/checkpoint.md", "repo_id": "accelerate", "token_count": 1156 }
2
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tracking There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex. 🤗 Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`] ## Integrated Trackers Currently `Accelerate` supports seven trackers out-of-the-box: - TensorBoard - WandB - CometML - Aim - MLFlow - ClearML - DVCLive To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]: ```python from accelerate import Accelerator from accelerate.utils import LoggerType accelerator = Accelerator(log_with="all") # For all available trackers in the environment accelerator = Accelerator(log_with="wandb") accelerator = Accelerator(log_with=["wandb", LoggerType.TENSORBOARD]) ``` At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged: ```python hps = {"num_iterations": 5, "learning_rate": 1e-2} accelerator.init_trackers("my_project", config=hps) ``` When you are ready to log any data, [`Accelerator.log`] should be used. A `step` can also be passed in to correlate the data with a particular step in the training loop. ```python accelerator.log({"train_loss": 1.12, "valid_loss": 0.8}, step=1) ``` Once you've finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any. ```python accelerator.end_training() ``` A full example is below: ```python from accelerate import Accelerator accelerator = Accelerator(log_with="all") config = { "num_iterations": 5, "learning_rate": 1e-2, "loss_function": str(my_loss_function), } accelerator.init_trackers("example_project", config=config) my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) device = accelerator.device my_model.to(device) for iteration in config["num_iterations"]: for step, batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) accelerator.backward(loss) my_optimizer.step() accelerator.log({"training_loss": loss}, step=step) accelerator.end_training() ``` If a tracker requires a directory to save data to, such as `TensorBoard`, then pass the directory path to `project_dir`. The `project_dir` parameter is useful when there are other configurations to be combined with in the [`~utils.ProjectConfiguration`] data class. For example, you can save the TensorBoard data to `project_dir` and everything else can be logged in the `logging_dir` parameter of [`~utils.ProjectConfiguration`: ```python accelerator = Accelerator(log_with="tensorboard", project_dir=".") # use with ProjectConfiguration config = ProjectConfiguration(project_dir=".", logging_dir="another/directory") accelerator = Accelerator(log_with="tensorboard", project_config=config) ``` ## Implementing Custom Trackers To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class. Every tracker must implement three functions and have three properties: - `__init__`: - Should store a `run_name` and initialize the tracker API of the integrated library. - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added. - `store_init_configuration`: - Should take in a `values` dictionary and store them as a one-time experiment configuration - `log`: - Should take in a `values` dictionary and a `step`, and should log them to the run - `name` (`str`): - A unique string name for the tracker, such as `"wandb"` for the wandb tracker. - This will be used for interacting with this tracker specifically - `requires_logging_directory` (`bool`): - Whether a `logging_dir` is needed for this particular tracker and if it uses one. - `tracker`: - This should be implemented as a `@property` function - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`. Each method should also utilize the [`state.PartialState`] class if the logger should only be executed on the main process for instance. A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information and logging just on the main process: ```python from accelerate.tracking import GeneralTracker, on_main_process from typing import Optional import wandb class MyCustomTracker(GeneralTracker): name = "wandb" requires_logging_directory = False @on_main_process def __init__(self, run_name: str): self.run_name = run_name run = wandb.init(self.run_name) @property def tracker(self): return self.run.run @on_main_process def store_init_configuration(self, values: dict): wandb.config(values) @on_main_process def log(self, values: dict, step: Optional[int] = None): wandb.log(values, step=step) ``` When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically be used with the API: ```python tracker = MyCustomTracker("some_run_name") accelerator = Accelerator(log_with=tracker) ``` These also can be mixed with existing trackers, including with `"all"`: ```python tracker = MyCustomTracker("some_run_name") accelerator = Accelerator(log_with=[tracker, "all"]) ``` ## Accessing the internal tracker If some custom interactions with a tracker might be wanted directly, you can quickly access one using the [`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker's `.name` attribute and it will return that tracker on the main process. This example shows doing so with wandb: ```python wandb_tracker = accelerator.get_tracker("wandb") ``` From there you can interact with `wandb`'s `run` object like normal: ```python wandb_run.log_artifact(some_artifact_to_log) ``` <Tip> Trackers built in Accelerate will automatically execute on the correct process, so if a tracker is only meant to be ran on the main process it will do so automatically. </Tip> If you want to truly remove Accelerate's wrapping entirely, you can achieve the same outcome with: ```python wandb_tracker = accelerator.get_tracker("wandb", unwrap=True) if accelerator.is_main_process: wandb_tracker.log_artifact(some_artifact_to_log) ``` ## When a wrapper cannot work If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement: ```diff from accelerate import Accelerator + import neptune.new as neptune accelerator = Accelerator() + run = neptune.init(...) my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) device = accelerator.device my_model.to(device) for iteration in config["num_iterations"]: for batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) total_loss += loss accelerator.backward(loss) my_optimizer.step() + if accelerator.is_main_process: + run["logs/training/batch/loss"].log(loss) ```
accelerate/docs/source/usage_guides/tracking.md/0
{ "file_path": "accelerate/docs/source/usage_guides/tracking.md", "repo_id": "accelerate", "token_count": 2703 }
3
#!/bin/bash -l #SBATCH --job-name=multicpu #SBATCH --nodes=2 # number of Nodes #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --exclusive #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j ###################### ### Set enviroment ### ###################### source activateEnvironment.sh ###################### #### Set network ##### ###################### head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) ###################### # Setup env variables for distributed jobs export MASTER_PORT="${MASTER_PORT:-29555 }" echo "head_node_ip=${head_node_ip}" echo "MASTER_PORT=${MASTER_PORT}" INSTANCES_PER_NODE="${INSTANCES_PER_NODE:-1}" if [[ $SLURM_NNODES == 1 ]] && [[ $INSTANCES_PER_NODE == 1 ]]; then export CCL_WORKER_COUNT=0 LAUNCHER="" else # Setup env variables for distributed jobs export CCL_WORKER_COUNT="${CCL_WORKER_COUNT:-2}" echo "CCL_WORKER_COUNT=${CCL_WORKER_COUNT}" # Write hostfile HOSTFILE_PATH=hostfile scontrol show hostname $SLURM_JOB_NODELIST | perl -ne 'chomb; print "$_"x1'> ${HOSTFILE_PATH} export LAUNCHER="accelerate launch \ --num_processes $((SLURM_NNODES * ${INSTANCES_PER_NODE})) \ --num_machines $SLURM_NNODES \ --rdzv_backend c10d \ --main_process_ip $head_node_ip \ --main_process_port $MASTER_PORT \ --mpirun_hostfile $HOSTFILE_PATH \ --mpirun_ccl $CCL_WORKER_COUNT" fi # This step is necessary because accelerate launch does not handle multiline arguments properly export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}" export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py" export SCRIPT_ARGS=" \ --cpu \ --output_dir ${ACCELERATE_DIR}/examples/output \ " # This step is necessary because accelerate launch does not handle multiline arguments properly export CMD="$LAUNCHER $SCRIPT $SCRIPT_ARGS" # Print the command echo $CMD echo "" # Run the command eval $CMD
accelerate/examples/slurm/submit_multicpu.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multicpu.sh", "repo_id": "accelerate", "token_count": 767 }
4
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from .config_args import default_config_file, load_config_from_file from .config_utils import SubcommandHelpFormatter description = "Update an existing config file with the latest defaults while maintaining the old configuration." def update_config(args): """ Update an existing config file with the latest defaults while maintaining the old configuration. """ config_file = args.config_file if config_file is None and Path(default_config_file).exists(): config_file = default_config_file elif not Path(config_file).exists(): raise ValueError(f"The passed config file located at {config_file} doesn't exist.") config = load_config_from_file(config_file) if config_file.endswith(".json"): config.to_json_file(config_file) else: config.to_yaml_file(config_file) return config_file def update_command_parser(parser, parents): parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=None, help=( "The path to the config file to update. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) parser.set_defaults(func=update_config_command) return parser def update_config_command(args): config_file = update_config(args) print(f"Sucessfully updated the configuration file at {config_file}.")
accelerate/src/accelerate/commands/config/update.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/update.py", "repo_id": "accelerate", "token_count": 774 }
5
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from types import MethodType from typing import Any, Dict, List, Optional, Tuple, Union from .state import PartialState from .utils import ( calculate_maximum_sizes, convert_bytes, copy_tensor_to_devices, ignorant_find_batch_size, infer_auto_device_map, is_pippy_available, pad_input_tensors, send_to_device, ) def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None): """ Calculates the device map for `model` with an offset for PiPPy """ if num_processes == 1: return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False) if max_memory is None: model_size, shared = calculate_maximum_sizes(model) # Split into `n` chunks for each GPU memory = (model_size + shared[0]) / num_processes memory = convert_bytes(memory) value, ending = memory.split(" ") # Add a chunk to deal with potential extra shared memory instances memory = math.ceil(float(value)) * 1.1 memory = f"{memory} {ending}" max_memory = {i: memory for i in range(num_processes)} device_map = infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, clean_result=False, ) return device_map def find_pippy_batch_size(args, kwargs): found_batch_size = None if args is not None: for arg in args: found_batch_size = ignorant_find_batch_size(arg) if found_batch_size is not None: break if kwargs is not None and found_batch_size is None: for kwarg in kwargs.values(): found_batch_size = ignorant_find_batch_size(kwarg) if found_batch_size is not None: break return found_batch_size def build_pipeline(model, split_points, args, kwargs, num_chunks): """ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing in needed `args` and `kwargs` as the model needs on the CPU. Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use `AcceleratorState.num_processes` """ # Note: We import here to reduce import time from general modules, and isolate outside dependencies from torch.distributed.pipelining import ScheduleGPipe, SplitPoint, pipeline # We need to annotate the split points in the model for PiPPy state = PartialState() split_spec = {split_point: SplitPoint.BEGINNING for split_point in split_points} pipe = pipeline( model, mb_args=args, mb_kwargs=kwargs, split_spec=split_spec, ) stage = pipe.build_stage(state.local_process_index, device=state.device) schedule = ScheduleGPipe(stage, num_chunks) return schedule def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs): state = PartialState() output = None if state.num_processes == 1: output = forward(*args, **kwargs) elif state.is_local_main_process: found_batch_size = find_pippy_batch_size(args, kwargs) if found_batch_size is None: raise ValueError("Could not find batch size from args or kwargs") else: if found_batch_size != num_chunks: args = pad_input_tensors(args, found_batch_size, num_chunks) kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) forward(*args, **kwargs) elif state.is_last_process: output = forward() else: forward() if gather_output: # Each node will get a copy of the full output which is only on the last GPU output = copy_tensor_to_devices(output) return output def prepare_pippy( model, split_points: Optional[Union[str, List[str]]] = "auto", no_split_module_classes: Optional[List[str]] = None, example_args: Optional[Tuple[Any]] = (), example_kwargs: Optional[Dict[str, Any]] = None, num_chunks: Optional[int] = None, gather_output: Optional[bool] = False, ): """ Wraps `model` for pipeline parallel inference. Args: model (`torch.nn.Module`): A model we want to split for pipeline-parallel inference split_points (`str` or `List[str]`, defaults to 'auto'): How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced split given any model. Should be a list of layer names in the model to split by otherwise. no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. example_args (tuple of model inputs): The expected inputs for the model that uses order-based inputs for a *single process*. Recommended to use this method if possible. example_kwargs (dict of model inputs) The expected inputs for the model that uses dictionary-based inputs for a *single process*. This is a *highly* limiting structure that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition is true for all cases. num_chunks (`int`, defaults to the number of available GPUs): The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but this can be tuned and played with. In general one should have num_chunks >= num_gpus. gather_output (`bool`, defaults to `False`): If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs. """ if not is_pippy_available(): raise ImportError("Using `torch.distributed.pipelining` requires PyTorch 2.4.0 or later.") state = PartialState() example_args = send_to_device(example_args, "cpu") example_kwargs = send_to_device(example_kwargs, "cpu") if num_chunks is None: num_chunks = state.num_processes if split_points == "auto": device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes) split_points = [] for i in range(1, num_chunks): split_points.append(next(k for k, v in device_map.items() if v == i)) model.hf_split_points = split_points stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks) model._original_forward = model.forward model._original_call = model.__call__ model.pippy_stage = stage model.hf_split_points = split_points def forward(*args, **kwargs): return pippy_forward(stage.step, num_chunks, gather_output, *args, **kwargs) # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` # Note: creates an infinite recursion loop with `generate` model_forward = MethodType(forward, model) forward.__wrapped__ = model_forward model.forward = forward return model
accelerate/src/accelerate/inference.py/0
{ "file_path": "accelerate/src/accelerate/inference.py", "repo_id": "accelerate", "token_count": 2855 }
6
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torchvision.models import resnet34 from transformers import ( BertConfig, BertForMaskedLM, GPT2Config, GPT2ForSequenceClassification, T5Config, T5ForConditionalGeneration, ) from accelerate import PartialState from accelerate.inference import prepare_pippy from accelerate.utils import DistributedType, send_to_device, set_seed model_to_config = { "t5": (T5ForConditionalGeneration, T5Config, 1024), "bert": (BertForMaskedLM, BertConfig, 512), "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024), } def get_model_and_data_for_text(model_name, device, num_processes: int = 2): initializer, config, seq_len = model_to_config[model_name] config_args = {} # Eventually needed for batch inference tests on gpt-2 when bs != 1 # if model_name == "gpt2": # config_args["pad_token_id"] = 0 model_config = config(**config_args) model = initializer(model_config) return model, torch.randint( low=0, high=model_config.vocab_size, size=(num_processes, seq_len), device=device, dtype=torch.int64, requires_grad=False, ) def test_gpt2(batch_size: int = 2): set_seed(42) state = PartialState() model, inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size) model = prepare_pippy(model, example_args=(inputs,), no_split_module_classes=model._no_split_modules) # For inference args need to be a tuple inputs = inputs.to("cuda") with torch.no_grad(): output = model(inputs) # Zach: Check that we just grab the real outputs we need at the end if not state.is_last_process: assert output is None, "Output was not generated on just the last process!" else: assert output is not None, "Output was not generated in the last process!" def test_t5(batch_size: int = 2): set_seed(42) state = PartialState() model, inputs = get_model_and_data_for_text("t5", "cpu", batch_size) example_inputs = {"input_ids": inputs, "decoder_input_ids": inputs} model = prepare_pippy( model, no_split_module_classes=model._no_split_modules, example_kwargs=example_inputs, ) # For inference args need to be a tuple inputs = send_to_device(example_inputs, "cuda:0") with torch.no_grad(): output = model(*inputs.values()) # Zach: Check that we just grab the real outputs we need at the end if not state.is_last_process: assert output is None, "Output was not generated on just the last process!" else: assert output is not None, "Output was not generated in the last process!" def test_resnet(batch_size: int = 2): set_seed(42) state = PartialState() model = resnet34() input_tensor = torch.rand(batch_size, 3, 224, 224) model = prepare_pippy( model, example_args=(input_tensor,), ) inputs = send_to_device(input_tensor, "cuda:0") with torch.no_grad(): output = model(inputs) # Zach: Check that we just grab the real outputs we need at the end if not state.is_last_process: assert output is None, "Output was not generated on just the last process!" else: assert output is not None, "Output was not generated in the last process!" if __name__ == "__main__": state = PartialState() state.print("Testing pippy integration...") if state.distributed_type == DistributedType.MULTI_GPU: state.print("Testing GPT2...") test_gpt2() # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue # due to references # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope # test_gpt2(3) state.print("Testing T5...") test_t5() test_t5(1) test_t5(3) state.print("Testing CV model...") test_resnet() test_resnet(3) state.destroy_process_group() else: print("Less than two GPUs found, not running tests!")
accelerate/src/accelerate/test_utils/scripts/external_deps/test_pippy.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_pippy.py", "repo_id": "accelerate", "token_count": 1745 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ General namespace and dataclass related classes """ import argparse import copy import enum import functools import os import warnings from contextlib import contextmanager from dataclasses import dataclass, field from datetime import timedelta from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, Union, get_args import torch from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY from .environment import parse_flag_from_env, str_to_bool from .imports import ( is_cuda_available, is_mlu_available, is_msamp_available, is_npu_available, is_transformer_engine_available, is_xpu_available, ) from .versions import compare_versions class KwargsHandler: """ Internal mixin that implements a `to_kwargs()` method for a dataclass. """ def to_dict(self): return copy.deepcopy(self.__dict__) def to_kwargs(self): """ Returns a dictionary containing the attributes with values different from the default of this class. """ # import clear_environment here to avoid circular import problem from .other import clear_environment with clear_environment(): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} class EnumWithContains(enum.EnumMeta): "A metaclass that adds the ability to check if `self` contains an item with the `in` operator" def __contains__(cls, item): try: cls(item) except ValueError: return False return True class BaseEnum(enum.Enum, metaclass=EnumWithContains): "An enum class that can get the value of an item with `str(Enum.key)`" def __str__(self): return self.value @classmethod def list(cls): "Method to list all the possible items in `cls`" return list(map(str, cls)) @dataclass class AutocastKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more information on each argument. Example: ```python from accelerate import Accelerator from accelerate.utils import AutocastKwargs kwargs = AutocastKwargs(cache_enabled=True) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ enabled: bool = True cache_enabled: bool = None class DDPCommunicationHookType(BaseEnum): """ Represents a type of communication hook used in DDP. Values: - **NO** -- no communication hook - **FP16** -- DDP communication hook to compress the gradients in FP16 - **BF16** -- DDP communication hook to compress the gradients in BF16 - **POWER_SGD** -- DDP communication hook to use PowerSGD - **BATCHED_POWER_SGD** -- DDP communication hook to use batched PowerSGD """ NO = "no" FP16 = "fp16" BF16 = "bf16" POWER_SGD = "power_sgd" BATCHED_POWER_SGD = "batched_power_sgd" @dataclass class DistributedDataParallelKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize how your model is wrapped in a `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more information on each argument. <Tip warning={true}> `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions. `static_graph` is only available in PyTorch 1.11.0 and later versions. </Tip> Example: ```python from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ dim: int = 0 broadcast_buffers: bool = True bucket_cap_mb: int = 25 find_unused_parameters: bool = False check_reduction: bool = False gradient_as_bucket_view: bool = False static_graph: bool = False comm_hook: DDPCommunicationHookType = DDPCommunicationHookType.NO comm_wrapper: Literal[ DDPCommunicationHookType.NO, DDPCommunicationHookType.FP16, DDPCommunicationHookType.BF16 ] = DDPCommunicationHookType.NO comm_state_option: dict = field(default_factory=dict) def to_dict(self, ignore_keys=("comm_hook", "comm_wrapper", "comm_state_option")): return {k: v for k, v in super().to_dict().items() if k not in ignore_keys} def register_comm_hook(self, model): from torch.distributed.algorithms.ddp_comm_hooks import default_hooks, powerSGD_hook hook_map: Dict[DDPCommunicationHookType, Callable] = { DDPCommunicationHookType.FP16: default_hooks.fp16_compress_hook, DDPCommunicationHookType.BF16: default_hooks.bf16_compress_hook, DDPCommunicationHookType.POWER_SGD: powerSGD_hook.powerSGD_hook, DDPCommunicationHookType.BATCHED_POWER_SGD: powerSGD_hook.batched_powerSGD_hook, } wrapper_map: Dict[DDPCommunicationHookType, Callable] = { DDPCommunicationHookType.FP16: default_hooks.fp16_compress_wrapper, DDPCommunicationHookType.BF16: default_hooks.bf16_compress_wrapper, } hook: Optional[Callable] = hook_map.get(self.comm_hook) wrapper: Optional[Callable] = wrapper_map.get(self.comm_wrapper) if hook and wrapper: hook = wrapper(hook) if hook: state = ( powerSGD_hook.PowerSGDState(None, **self.comm_state_option) if self.comm_hook in (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.BATCHED_POWER_SGD) else None ) model.register_comm_hook( state=state, hook=hook, ) @dataclass class GradScalerKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument. <Tip warning={true}> `GradScaler` is only available in PyTorch 1.5.0 and later versions. </Tip> Example: ```python from accelerate import Accelerator from accelerate.utils import GradScalerKwargs kwargs = GradScalerKwargs(backoff_filter=0.25) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ init_scale: float = 65536.0 growth_factor: float = 2.0 backoff_factor: float = 0.5 growth_interval: int = 2000 enabled: bool = True @dataclass class InitProcessGroupKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer to the documentation of this [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more information on each argument. Note: If `timeout` is set to `None`, the default will be based upon how `backend` is set. ```python from datetime import timedelta from accelerate import Accelerator from accelerate.utils import InitProcessGroupKwargs kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800)) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ backend: Optional[str] = "nccl" init_method: Optional[str] = None timeout: Optional[timedelta] = None def __post_init__(self): if self.timeout is None: seconds = 1800 if self.backend != "nccl" else 600 self.timeout = timedelta(seconds=seconds) # Literals Backend = Literal["MSAMP", "TE"] OptLevel = Literal["O1", "O2"] FP8Format = Literal["E4M3", "HYBRID"] AmaxComputeAlgorithm = Literal["max", "most_recent"] @dataclass class FP8RecipeKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision training with `transformer-engine` or `ms-amp`. <Tip> For more information on `transformer-engine` args, please refer to the API [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html). For more information on the `ms-amp` args, please refer to the Optimization Level [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level). </Tip> ```python from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID") accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs]) ``` To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`: ```python kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02") ``` Args: backend (`str`, *optional*): Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine). If not passed, will use whichever is available in the environment, prioritizing MS-AMP. use_autocast_during_eval (`bool`, *optional*, default to `False`): Whether to use FP8 autocast during eval mode. Generally better metrics are found when this is `False`. margin (`int`, *optional*, default to 0): The margin to use for the gradient scaling. interval (`int`, *optional*, default to 1): The interval to use for how often the scaling factor is recomputed. fp8_format (`str`, *optional*, default to "HYBRID"): The format to use for the FP8 recipe. Must be one of `HYBRID` or `E4M3`. (Generally `HYBRID` for training, `E4M3` for evaluation) amax_history_len (`int`, *optional*, default to 1024): The length of the history to use for the scaling factor computation amax_compute_algo (`str`, *optional*, default to "most_recent"): The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`. override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`): Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. optimization_level (`str`), one of `O1`, `O2`. (default is `O2`): What level of 8-bit collective communication should be used with MS-AMP. In general: * O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU memory usage and communication bandwidth * O2: First-order optimizer states are in 8-bit, and second order states are in FP16. Only available when using Adam or AdamW. This maintains accuracy and can potentially save the highest memory. * 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not available currently). """ backend: Backend = None use_autocast_during_eval: bool = None opt_level: OptLevel = None margin: int = None interval: int = None fp8_format: FP8Format = None amax_history_len: int = None amax_compute_algo: AmaxComputeAlgorithm = None override_linear_precision: Tuple[bool, bool, bool] = None def __post_init__(self): env_prefix = "ACCELERATE_FP8_" default_backend = "msamp" if is_msamp_available() else "te" if self.backend is None: self.backend = os.environ.get(env_prefix + "BACKEND", default_backend) self.backend = self.backend.upper() if self.backend not in get_args(Backend): raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).") # Check TE args if self.backend == "TE": if not is_transformer_engine_available(): raise ValueError( "TransformerEngine is not available. Please either install it, or use the 'MSAMP' backend (if installed)." ) if self.use_autocast_during_eval is None: self.use_autocast_during_eval = parse_flag_from_env(env_prefix + "USE_AUTOCAST_DURING_EVAL") if self.margin is None: self.margin = int(os.environ.get(env_prefix + "MARGIN", 0)) if self.interval is None: self.interval = int(os.environ.get(env_prefix + "INTERVAL", 1)) if self.fp8_format is None: self.fp8_format = os.environ.get(env_prefix + "FORMAT", "HYBRID") self.fp8_format = self.fp8_format.upper() if self.fp8_format not in get_args(FP8Format): raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.") if self.amax_compute_algo is None: self.amax_compute_algo = os.environ.get(env_prefix + "AMAX_COMPUTE_ALGO", "most_recent") self.amax_compute_algo = self.amax_compute_algo.lower() if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm): raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}") if self.amax_history_len is None: self.amax_history_len = int(os.environ.get(env_prefix + "AMAX_HISTORY_LEN", 1024)) if self.override_linear_precision is None: fprop = parse_flag_from_env(env_prefix + "OVERRIDE_FPROP") dgrad = parse_flag_from_env(env_prefix + "OVERRIDE_DGRAD") wgrad = parse_flag_from_env(env_prefix + "OVERRIDE_WGRAD") self.override_linear_precision = (fprop, dgrad, wgrad) elif self.backend == "MSAMP": if not is_msamp_available(): raise ValueError( "MS-AMP is not available. Please either install it, or use the 'TE' backend (if installed)." ) if self.opt_level is None: self.opt_level = os.environ.get(env_prefix + "OPT_LEVEL", "O2") if self.opt_level not in get_args(OptLevel): raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}") # Literal ProfilerActivity = Literal["cpu", "xpu", "mtia", "cuda"] @dataclass class ProfileKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the initialization of the profiler. Please refer to the documentation of this [context manager](https://pytorch.org/docs/stable/profiler.html#torch.profiler.profile) for more information on each argument. <Tip warning={true}> `torch.profiler` is only available in PyTorch 1.8.1 and later versions. </Tip> Example: ```python from accelerate import Accelerator from accelerate.utils import ProfileKwargs kwargs = ProfileKwargs(activities=["cpu", "cuda"]) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` Args: activities (`List[str]`, *optional*, default to `None`): The list of activity groups to use in profiling. Must be one of `"cpu"`, `"xpu"`, `"mtia"`, or `"cuda"`. schedule_option (`Dict[str, int]`, *optional*, default to `None`): The schedule option to use for the profiler. Available keys are `wait`, `warmup`, `active`, `repeat` and `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` parameter, the zero value means that the cycles will continue until the profiling is finished. on_trace_ready (`Callable`, *optional*, default to `None`): Callable that is called at each step when schedule returns `ProfilerAction.RECORD_AND_SAVE` during the profiling. record_shapes (`bool`, *optional*, default to `False`): Save information about operator’s input shapes. profile_memory (`bool`, *optional*, default to `False`): Track tensor memory allocation/deallocation with_stack (`bool`, *optional*, default to `False`): Record source information (file and line number) for the ops. with_flops (`bool`, *optional*, default to `False`): Use formula to estimate the FLOPS of specific operators with_modules (`bool`, *optional*, default to `False`): Record module hierarchy (including function names) corresponding to the callstack of the op. output_trace_dir (`str`, *optional*, default to `None`): Exports the collected trace in Chrome JSON format. Chrome use 'chrome://tracing' view json file. Defaults to None, which means profiling does not store json files. """ activities: Optional[List[ProfilerActivity]] = None schedule_option: Optional[Dict[str, int]] = None on_trace_ready: Optional[Callable] = None record_shapes: bool = False profile_memory: bool = False with_stack: bool = False with_flops: bool = False with_modules: bool = False output_trace_dir: Optional[str] = None def _get_profiler_activity(self, activity: ProfilerActivity) -> torch.profiler.ProfilerActivity: """Get the profiler activity from the string. Args: activity (str): The profiler activity name. Returns: torch.profiler.ProfilerActivity: The profiler activity. """ profiler_activity_map: dict[str, torch.profiler.ProfilerActivity] = { "cpu": torch.profiler.ProfilerActivity.CPU, "xpu": torch.profiler.ProfilerActivity.XPU, "mita": torch.profiler.ProfilerActivity.MTIA, "cuda": torch.profiler.ProfilerActivity.CUDA, } if activity not in profiler_activity_map: raise ValueError(f"Invalid profiler activity: {activity}. Must be one of {list(profiler_activity_map)}.") return profiler_activity_map[activity] def build(self) -> torch.profiler.profile: """ Build a profiler object with the current configuration. Returns: torch.profiler.profile: The profiler object. """ activities: Optional[List[ProfilerActivity]] = None if self.activities is not None: activities = [self._get_profiler_activity(activity) for activity in self.activities] schedule: Optional[torch.profiler.schedule] = None if self.schedule_option is not None: schedule = torch.profiler.schedule(**self.schedule_option) return torch.profiler.profile( activities=activities, schedule=schedule, on_trace_ready=self.on_trace_ready, record_shapes=self.record_shapes, profile_memory=self.profile_memory, with_stack=self.with_stack, with_flops=self.with_flops, with_modules=self.with_modules, ) class DeprecatedFieldDescriptor: """ Descriptor for deprecated fields in an enum class. Args: field_name (`str`): The name of the deprecated field. replaced_with (`str`): The name of the field that replaces the deprecated one. """ def __init__(self, field_name, replaced_with): self.field_name = field_name self.replaced_with = replaced_with def __get__(self, instance, owner): warnings.warn( f"The `{self.field_name}` of `{owner}` is deprecated and will be removed in v1.0.0. " f"Please use the `{self.replaced_with}` instead.", FutureWarning, ) return getattr(owner, self.replaced_with) class DistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **MULTI_CPU** -- Distributed on multiple CPU nodes. - **MULTI_GPU** -- Distributed on multiple GPUs. - **MULTI_MLU** -- Distributed on multiple MLUs. - **MULTI_MUSA** -- Distributed on multiple MUSAs. - **MULTI_NPU** -- Distributed on multiple NPUs. - **MULTI_XPU** -- Distributed on multiple XPUs. - **DEEPSPEED** -- Using DeepSpeed. - **XLA** -- Using TorchXLA. - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead. """ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. NO = "NO" MULTI_CPU = "MULTI_CPU" MULTI_GPU = "MULTI_GPU" MULTI_NPU = "MULTI_NPU" MULTI_MLU = "MULTI_MLU" MULTI_MUSA = "MULTI_MUSA" MULTI_XPU = "MULTI_XPU" DEEPSPEED = "DEEPSPEED" FSDP = "FSDP" XLA = "XLA" MEGATRON_LM = "MEGATRON_LM" TPU = DeprecatedFieldDescriptor("TPU", "XLA") class SageMakerDistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **DATA_PARALLEL** -- using sagemaker distributed data parallelism. - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism. """ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. NO = "NO" DATA_PARALLEL = "DATA_PARALLEL" MODEL_PARALLEL = "MODEL_PARALLEL" class FP8BackendType(str, enum.Enum): """ Represents the backend used for FP8. Values: - **TE** -- using TransformerEngine. - **MSAMP** -- using msamp. """ # Subclassing str as well as Enum allows the `FP8BackendType` to be JSON-serializable out of the box. TE = "TE" MSAMP = "MSAMP" class ComputeEnvironment(str, enum.Enum): """ Represents a type of the compute environment. Values: - **LOCAL_MACHINE** -- private/custom cluster hardware. - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment. """ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box. LOCAL_MACHINE = "LOCAL_MACHINE" AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER" class DynamoBackend(str, BaseEnum): """ Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html). Values: - **NO** -- Do not use torch dynamo. - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo issues. - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups. - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton kernels. [Read more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747) - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757) - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst) - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/) - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read more](https://github.com/onnx/onnx-tensorrt) - **AOT_TORCHXLA_TRACE_ONCE** -- Uses Pytorch/XLA with TorchDynamo optimization, for training. [Read more](https://github.com/pytorch/xla/blob/r2.0/docs/dynamo.md) - **TORCHXLA_TRACE_ONCE** -- Uses Pytorch/XLA with TorchDynamo optimization, for inference. [Read more](https://github.com/pytorch/xla/blob/r2.0/docs/dynamo.md) - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read more](https://github.com/intel/intel-extension-for-pytorch). - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/) """ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. NO = "NO" EAGER = "EAGER" AOT_EAGER = "AOT_EAGER" INDUCTOR = "INDUCTOR" AOT_TS_NVFUSER = "AOT_TS_NVFUSER" NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER" CUDAGRAPHS = "CUDAGRAPHS" OFI = "OFI" FX2TRT = "FX2TRT" ONNXRT = "ONNXRT" TENSORRT = "TENSORRT" AOT_TORCHXLA_TRACE_ONCE = "AOT_TORCHXLA_TRACE_ONCE" TORCHXLA_TRACE_ONCE = "TORCHXLA_TRACE_ONCE" IPEX = "IPEX" TVM = "TVM" class LoggerType(BaseEnum): """Represents a type of supported experiment tracker Values: - **ALL** -- all available trackers in the environment that are supported - **TENSORBOARD** -- TensorBoard as an experiment tracker - **WANDB** -- wandb as an experiment tracker - **COMETML** -- comet_ml as an experiment tracker - **DVCLIVE** -- dvclive as an experiment tracker """ ALL = "all" AIM = "aim" TENSORBOARD = "tensorboard" WANDB = "wandb" COMETML = "comet_ml" MLFLOW = "mlflow" CLEARML = "clearml" DVCLIVE = "dvclive" class PrecisionType(str, BaseEnum): """Represents a type of precision used on floating point values Values: - **NO** -- using full precision (FP32) - **FP16** -- using half precision - **BF16** -- using brain floating point precision """ NO = "no" FP8 = "fp8" FP16 = "fp16" BF16 = "bf16" class RNGType(BaseEnum): TORCH = "torch" CUDA = "cuda" MLU = "mlu" MUSA = "musa" NPU = "npu" XLA = "xla" XPU = "xpu" GENERATOR = "generator" class CustomDtype(enum.Enum): r""" An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`. """ FP8 = "fp8" INT4 = "int4" INT2 = "int2" # data classes @dataclass class TensorInformation: shape: torch.Size dtype: torch.dtype @dataclass class DataLoaderConfiguration: """ Configuration for dataloader-related items when calling `accelerator.prepare`. """ split_batches: bool = field( default=False, metadata={ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If" " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a" " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set" " in your script multiplied by the number of processes." }, ) dispatch_batches: bool = field( default=None, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process" " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose" " underlying dataset is an `IterableDataset`, `False` otherwise." }, ) even_batches: bool = field( default=True, metadata={ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the" " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among" " all workers." }, ) use_seedable_sampler: bool = field( default=False, metadata={ "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])." "Ensures training results are fully reproducable using a different sampling technique. " "While seed-to-seed results may differ, on average the differences are neglible when using" "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results." }, ) non_blocking: bool = field( default=False, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator will utilize non-blocking host-to-device" " transfers, allowing for better overlap between dataloader communication and computation. Recommended that the" " prepared dataloader has `pin_memory` set to `True` to work properly." }, ) use_stateful_dataloader: bool = field( default=False, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator will be backed by " "[torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed." }, ) @dataclass class ProjectConfiguration: """ Configuration for the Accelerator object based on inner-project needs. """ project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."}) logging_dir: str = field( default=None, metadata={ "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`." }, ) automatic_checkpoint_naming: bool = field( default=False, metadata={"help": "Whether saved states should be automatically iteratively named."}, ) total_limit: int = field( default=None, metadata={"help": "The maximum number of total saved states to keep."}, ) iteration: int = field( default=0, metadata={"help": "The current save iteration."}, ) save_on_each_node: bool = field( default=False, metadata={ "help": ( "When doing multi-node distributed training, whether to save models and checkpoints on each node, or" " only on the main one" ) }, ) def set_directories(self, project_dir: str = None): "Sets `self.project_dir` and `self.logging_dir` to the appropriate values." self.project_dir = project_dir if self.logging_dir is None: self.logging_dir = project_dir def __post_init__(self): self.set_directories(self.project_dir) @dataclass class GradientAccumulationPlugin(KwargsHandler): """ A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error. Parameters: num_steps (`int`): The number of steps to accumulate gradients for. adjust_scheduler (`bool`, *optional*, defaults to `True`): Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation. sync_with_dataloader (`bool`, *optional*, defaults to `True`): Whether to synchronize setting the gradients when at the end of the dataloader. sync_each_batch (`bool`, *optional*): Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed. Example: ```python from accelerate.utils import GradientAccumulationPlugin gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2) accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin) ``` """ num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."}) adjust_scheduler: bool = field( default=True, metadata={ "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation." }, ) sync_with_dataloader: bool = field( default=True, metadata={ "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing." }, ) sync_each_batch: bool = field( default=False, metadata={ "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed." }, ) @dataclass class TorchDynamoPlugin(KwargsHandler): """ This plugin is used to compile a model with PyTorch 2.0 """ backend: DynamoBackend = field( default=None, metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"}, ) mode: str = field( default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"} ) fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"}) dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"}) options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."}) disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"}) def __post_init__(self): prefix = "ACCELERATE_DYNAMO_" if self.backend is None: self.backend = os.environ.get(prefix + "BACKEND", "no") self.backend = DynamoBackend(self.backend.upper()) if self.mode is None: self.mode = os.environ.get(prefix + "MODE", "default") if self.fullgraph is None: self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1 if self.dynamic is None: self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1 def to_dict(self): dynamo_config = copy.deepcopy(self.__dict__) dynamo_config["backend"] = dynamo_config["backend"].value.lower() return dynamo_config @dataclass class DeepSpeedPlugin: """ This plugin is used to integrate DeepSpeed. """ hf_ds_config: Any = field( default=None, metadata={ "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`." }, ) gradient_accumulation_steps: int = field( default=None, metadata={ "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly." }, ) gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"}) zero_stage: int = field( default=None, metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"}, ) is_train_batch_min: bool = field( default=True, metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"}, ) offload_optimizer_device: str = field( default=None, metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."}, ) offload_param_device: str = field( default=None, metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."}, ) offload_optimizer_nvme_path: str = field( default=None, metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."}, ) offload_param_nvme_path: str = field( default=None, metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."}, ) zero3_init_flag: bool = field( default=None, metadata={ "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models." "Only applicable with ZeRO Stage-3." }, ) zero3_save_16bit_model: bool = field( default=None, metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."}, ) transformer_moe_cls_names: str = field( default=None, metadata={ "help": "comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." }, ) def __post_init__(self): from .deepspeed import HfDeepSpeedConfig if self.gradient_accumulation_steps is None: gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto") self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas if self.gradient_clipping is None: gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "auto") self.gradient_clipping = gradient_clipping if gradient_clipping == "auto" else float(gradient_clipping) if self.zero_stage is None: self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2)) if self.offload_optimizer_device is None: self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none") if self.offload_param_device is None: self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none") if self.offload_optimizer_nvme_path is None: self.offload_optimizer_nvme_path = os.environ.get( "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none" ) if self.offload_param_nvme_path is None: self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none") if self.zero3_save_16bit_model is None: self.zero3_save_16bit_model = ( os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true" ) if self.hf_ds_config is None: self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none") if ( isinstance(self.hf_ds_config, dict) or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none") or isinstance(self.hf_ds_config, HfDeepSpeedConfig) ): if not isinstance(self.hf_ds_config, HfDeepSpeedConfig): self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config) if "gradient_accumulation_steps" not in self.hf_ds_config.config: self.hf_ds_config.config["gradient_accumulation_steps"] = 1 if "zero_optimization" not in self.hf_ds_config.config: raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.") self._deepspeed_config_checks() plugin_to_config_mapping = { "gradient_accumulation_steps": "gradient_accumulation_steps", "gradient_clipping": "gradient_clipping", "zero_stage": "zero_optimization.stage", "offload_optimizer_device": "zero_optimization.offload_optimizer.device", "offload_param_device": "zero_optimization.offload_param.device", "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path", "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path", "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save", } kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None} for key in kwargs.keys(): self.fill_match(key, **kwargs, must_match=False) self.hf_ds_config.set_stage_and_offload() # filling the missing values in the class attributes from the DeepSpeed config # when using the DeepSpeed config file. for key, value in plugin_to_config_mapping.items(): config_value = self.hf_ds_config.get_value(value) if config_value is not None and config_value != "auto": setattr(self, key, config_value) else: config = { "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": self.gradient_accumulation_steps, "zero_optimization": { "stage": self.zero_stage, "offload_optimizer": { "device": self.offload_optimizer_device, "nvme_path": self.offload_optimizer_nvme_path if self.offload_optimizer_device == "nvme" else None, }, "offload_param": { "device": self.offload_param_device, "nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None, }, "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model, }, } if self.gradient_clipping: config["gradient_clipping"] = self.gradient_clipping self.hf_ds_config = HfDeepSpeedConfig(config) self.deepspeed_config = self.hf_ds_config.config self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout if self.zero3_init_flag is None: self.zero3_init_flag = ( str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1 ) if self.zero3_init_flag and not self.hf_ds_config.is_zero3(): warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.") self.zero3_init_flag = False def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches config, ds_key = self.hf_ds_config.find_config_node(ds_key_long) if config is None: return if config.get(ds_key) == "auto": if ds_key_long in kwargs: config[ds_key] = kwargs[ds_key_long] return else: raise ValueError( f"`{ds_key_long}` not found in kwargs. " f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or " "pass it in kwargs." ) if not must_match: return ds_val = config.get(ds_key) if ds_val is not None and ds_key_long in kwargs: if ds_val != kwargs[ds_key_long]: mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}") def is_auto(self, ds_key_long): val = self.hf_ds_config.get_value(ds_key_long) if val is None: return False else: return val == "auto" def get_value(self, ds_key_long, default=None): return self.hf_ds_config.get_value(ds_key_long, default) def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): """Process the DeepSpeed config with the values from the kwargs.""" mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == "": mismatches_msg = "\n".join(mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch kwargs " f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." ) def set_mixed_precision(self, mixed_precision): ds_config = self.deepspeed_config kwargs = { "fp16.enabled": mixed_precision == "fp16", # When training in fp8, we still rely on bf16 autocast for the core mixed precision "bf16.enabled": mixed_precision in ("bf16", "fp8"), } if mixed_precision == "fp16": if "fp16" not in ds_config: ds_config["fp16"] = {"enabled": True, "auto_cast": True} elif mixed_precision in ("bf16", "fp8"): if "bf16" not in ds_config: ds_config["bf16"] = {"enabled": True} if mixed_precision != "no": diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16" if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true": raise ValueError( f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file." ) for dtype in ["fp16", "bf16"]: if dtype not in ds_config: ds_config[dtype] = {"enabled": False} self.fill_match("fp16.enabled", must_match=False, **kwargs) self.fill_match("bf16.enabled", must_match=False, **kwargs) def set_deepspeed_weakref(self): from .imports import is_transformers_available if self.zero3_init_flag: if not is_transformers_available(): raise Exception( "When `zero3_init_flag` is set, it requires Transformers to be installed. " "Please run `pip install transformers`." ) ds_config = copy.deepcopy(self.deepspeed_config) if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto": ds_config["gradient_accumulation_steps"] = 1 if ( "train_micro_batch_size_per_gpu" not in ds_config or ds_config["train_micro_batch_size_per_gpu"] == "auto" ): ds_config["train_micro_batch_size_per_gpu"] = 1 if ds_config.get("train_batch_size", None) == "auto": del ds_config["train_batch_size"] if compare_versions("transformers", "<", "4.33"): from transformers.deepspeed import HfDeepSpeedConfig else: from transformers.integrations import HfDeepSpeedConfig self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa def is_zero3_init_enabled(self): return self.zero3_init_flag @contextmanager def zero3_init_context_manager(self, enable=False): old = self.zero3_init_flag if old == enable: yield else: self.zero3_init_flag = enable self.dschf = None self.set_deepspeed_weakref() yield self.zero3_init_flag = old self.dschf = None self.set_deepspeed_weakref() def _deepspeed_config_checks(self): env_variable_names_to_ignore = [ "ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "ACCELERATE_GRADIENT_CLIPPING", "ACCELERATE_DEEPSPEED_ZERO_STAGE", "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "ACCELERATE_MIXED_PRECISION", ] env_variable_names_to_ignore = [ name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore ] deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",") if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config): raise ValueError( f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n" "Please specify them appropriately in the DeepSpeed config file.\n" "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n" "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n" "It will only ask for the necessary config variables when using `deepspeed_config_file`." ) def set_moe_leaf_modules(self, model): if self.transformer_moe_cls_names is None: self.transformer_moe_cls_names = os.environ.get("ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES", None) if self.transformer_moe_cls_names is not None: if compare_versions("deepspeed", "<", "0.14.0"): raise ImportError("DeepSpeed version must be >= 0.14.0 to use MOE support. Please update DeepSpeed.") from deepspeed.utils import set_z3_leaf_modules class_names = self.transformer_moe_cls_names.split(",") transformer_moe_cls = [] for layer_class in class_names: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception( f"Could not find a transformer layer class called '{layer_class}' to wrap in the model." ) else: transformer_moe_cls.append(transformer_cls) set_z3_leaf_modules(model, transformer_moe_cls) # z3_leaf @dataclass class FullyShardedDataParallelPlugin: """ This plugin is used to enable fully sharded data parallelism. """ sharding_strategy: Union[str, "torch.distributed.fsdp.ShardingStrategy"] = field( default=None, metadata={ "help": "Sharding strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. Defaults to 'FULL_SHARD'" }, ) backward_prefetch: Union[str, "torch.distributed.fsdp.BackwardPrefetch"] = field( default=None, metadata={ "help": "Backward prefetch strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`. Defaults to 'NO_PREFETCH'" }, ) mixed_precision_policy: Optional[Union[dict, "torch.distributed.fsdp.MixedPrecision"]] = field( default=None, metadata={ "help": "A config to enable mixed precision training with FullyShardedDataParallel. " "If passing in a `dict`, it should have the following keys: `param_dtype`, `reduce_dtype`, and `buffer_dtype`." }, ) auto_wrap_policy: Optional[ Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]] ] = field( default=None, metadata={ "help": "A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. " "Defaults to `NO_WRAP`. See `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like" }, ) cpu_offload: Union[bool, "torch.distributed.fsdp.CPUOffload"] = field( default=None, metadata={ "help": "Whether to offload parameters to CPU. Should be either a `bool` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`. Defaults to `False`" }, ) ignored_modules: Optional[Iterable[torch.nn.Module]] = field( default=None, metadata={"help": "A list of modules to ignore when wrapping with FSDP."}, ) state_dict_type: Union[str, "torch.distributed.fsdp.StateDictType"] = field( default=None, metadata={ "help": "State dict type to use. If a string, it must be one of `full_state_dict`, `local_state_dict`, or `sharded_state_dict`. Defaults to `FULL_STATE_DICT`" }, ) state_dict_config: Optional[ Union[ "torch.distributed.fsdp.FullStateDictConfig", "torch.distributed.fsdp.ShardedStateDictConfig", ] ] = field( default=None, metadata={"help": "State dict config to use. Is determined based on the `state_dict_type` if not passed in."}, ) optim_state_dict_config: Optional[ Union["torch.distributed.fsdp.FullOptimStateDictConfig", "torch.distributed.fsdp.ShardedOptimStateDictConfig"] ] = field( default=None, metadata={ "help": "Optim state dict config to use. Is determined based on the `state_dict_type` if not passed in." }, ) limit_all_gathers: bool = field( default=True, metadata={ "help": "Whether to have FSDP explicitly synchronizes the CPU thread to prevent " "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. " "Enabling this can help lower the number of CUDA malloc retries." }, ) use_orig_params: bool = field( default=None, metadata={"help": "Whether to use the original parameters for the optimizer. Defaults to `False`"}, ) param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field( default=None, metadata={ "help": "A Callable[torch.nn.Module] -> None that specifies how modules " "that are currently on the meta device should be initialized onto an actual device. " "Only applicable when `sync_module_states` is `True`. By default is a `lambda` which calls `to_empty` on the module." }, ) sync_module_states: bool = field( default=None, metadata={ "help": "Whether each individually wrapped FSDP unit should broadcast module parameters from rank 0 " "to ensure they are the same across all ranks after initialization. Defaults to `False` unless " "`cpu_ram_efficient_loading` is `True`, then will be forcibly enabled." }, ) forward_prefetch: bool = field( default=None, metadata={ "help": "Whether to have FSDP explicitly prefetches the next upcoming " "all-gather while executing in the forward pass. only use with Static graphs. Defaults to `False`" }, ) activation_checkpointing: bool = field( default=None, metadata={ "help": "A technique to reduce memory usage by clearing activations of " "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time " "for reduced memory usage. Defaults to `False`" }, ) cpu_ram_efficient_loading: bool = field( default=None, metadata={ "help": "If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " "Only applicable for 🤗 Transformers. When using this, `sync_module_states` needs to be `True`. Defaults to `False`." }, ) transformer_cls_names_to_wrap: Optional[List[str]] = field( default=None, metadata={ "help": "A list of transformer layer class names to wrap. Only applicable when `auto_wrap_policy` is `transformer_based_wrap`." }, ) min_num_params: Optional[int] = field( default=None, metadata={ "help": "The minimum number of parameters a module must have to be wrapped. Only applicable when `auto_wrap_policy` is `size_based_wrap`." }, ) def __post_init__(self): from torch.distributed.fsdp import ( BackwardPrefetch, CPUOffload, ShardingStrategy, ) env_prefix = "FSDP_" # Strategy: By default we should always assume that values are passed in, else we check the environment variables if self.sharding_strategy is None: self.sharding_strategy = os.environ.get(env_prefix + "SHARDING_STRATEGY", "FULL_SHARD") if isinstance(self.sharding_strategy, str): # We need to remap based on custom enum values for user readability if self.sharding_strategy.upper() in FSDP_SHARDING_STRATEGY: self.sharding_strategy = FSDP_SHARDING_STRATEGY.index(self.sharding_strategy.upper()) + 1 if isinstance(self.sharding_strategy, int) or self.sharding_strategy.isdigit(): self.sharding_strategy = ShardingStrategy(int(self.sharding_strategy)) else: self.sharding_strategy = ShardingStrategy[self.sharding_strategy.upper()] if self.cpu_offload is None: self.cpu_offload = str_to_bool(os.environ.get(env_prefix + "OFFLOAD_PARAMS", "False")) == 1 if isinstance(self.cpu_offload, bool): self.cpu_offload = CPUOffload(offload_params=self.cpu_offload) if self.backward_prefetch is None: self.backward_prefetch = os.environ.get(env_prefix + "BACKWARD_PREFETCH", None) if isinstance(self.backward_prefetch, str) and self.backward_prefetch.upper() == "NO_PREFETCH": self.backward_prefetch = None if self.backward_prefetch is not None and not isinstance(self.backward_prefetch, BackwardPrefetch): if isinstance(self.backward_prefetch, str) and self.backward_prefetch.upper() in FSDP_BACKWARD_PREFETCH: self.backward_prefetch = FSDP_BACKWARD_PREFETCH.index(self.backward_prefetch.upper()) + 1 if isinstance(self.backward_prefetch, int) or self.backward_prefetch.isdigit(): self.backward_prefetch = BackwardPrefetch(int(self.backward_prefetch)) else: self.backward_prefetch = BackwardPrefetch[self.backward_prefetch.upper()] self.set_state_dict_type() if self.auto_wrap_policy is None: self.auto_wrap_policy = os.environ.get(env_prefix + "AUTO_WRAP_POLICY", "NO_WRAP") if isinstance(self.auto_wrap_policy, str): if self.auto_wrap_policy.upper() not in FSDP_AUTO_WRAP_POLICY: raise ValueError( f"Invalid auto wrap policy: {self.auto_wrap_policy}. Must be one of {list(FSDP_AUTO_WRAP_POLICY.keys())}" ) from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy if self.auto_wrap_policy.upper() == "TRANSFORMER_BASED_WRAP": self.auto_wrap_policy = transformer_auto_wrap_policy if self.transformer_cls_names_to_wrap is None: self.transformer_cls_names_to_wrap = os.environ.get(env_prefix + "TRANSFORMER_CLS_TO_WRAP", None) if isinstance(self.transformer_cls_names_to_wrap, str): self.transformer_cls_names_to_wrap = self.transformer_cls_names_to_wrap.split(",") elif self.auto_wrap_policy.upper() == "SIZE_BASED_WRAP": self.auto_wrap_policy = size_based_auto_wrap_policy if self.min_num_params is None: self.min_num_params = int(os.environ.get(env_prefix + "MIN_NUM_PARAMS", 0)) elif not isinstance(self.min_num_params, int): raise ValueError( f"`min_num_params` must be an integer. Got {self.min_num_params} of type {type(self.min_num_params)}" ) elif self.auto_wrap_policy.upper() == "NO_WRAP": self.auto_wrap_policy = None if self.use_orig_params is None: self.use_orig_params = str_to_bool(os.environ.get(env_prefix + "USE_ORIG_PARAMS", "False")) == 1 if self.sync_module_states is None: self.sync_module_states = str_to_bool(os.environ.get(env_prefix + "SYNC_MODULE_STATES", "False")) == 1 if self.forward_prefetch is None: self.forward_prefetch = str_to_bool(os.environ.get(env_prefix + "FORWARD_PREFETCH", "False")) == 1 if self.activation_checkpointing is None: self.activation_checkpointing = ( str_to_bool(os.environ.get(env_prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1 ) if self.cpu_ram_efficient_loading is None: self.cpu_ram_efficient_loading = ( str_to_bool(os.environ.get(env_prefix + "CPU_RAM_EFFICIENT_LOADING", "False")) == 1 ) if self.cpu_ram_efficient_loading and not self.sync_module_states: warnings.warn( "sync_module_states cannot be False since efficient cpu ram loading enabled. " "Setting sync_module_states to True." ) self.sync_module_states = True if isinstance(self.mixed_precision_policy, dict): self.set_mixed_precision(self.mixed_precision_policy) if self.sync_module_states: if is_npu_available(): device = torch.npu.current_device() elif is_mlu_available(): device = torch.mlu.current_device() elif is_cuda_available(): device = torch.cuda.current_device() elif is_xpu_available(): device = torch.xpu.current_device() else: raise RuntimeError( "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'." ) # Create a function that will be used to initialize the parameters of the model # when using `sync_module_states` self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False) def set_state_dict_type(self): """ Set the state dict config based on the `StateDictType. """ from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullOptimStateDictConfig, FullStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) if self.state_dict_type is None: self.state_dict_type = os.environ.get("FSDP_STATE_DICT_TYPE", "FULL_STATE_DICT") if isinstance(self.state_dict_type, str): if self.state_dict_type.isdigit(): self.state_dict_type = StateDictType(int(self.state_dict_type)) else: self.state_dict_type = StateDictType[self.state_dict_type.upper()] if self.state_dict_type == StateDictType.FULL_STATE_DICT: if self.state_dict_config is None: self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) if self.optim_state_dict_config is None: self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True) elif self.state_dict_type == StateDictType.SHARDED_STATE_DICT: if self.state_dict_config is None: self.state_dict_config = ShardedStateDictConfig(offload_to_cpu=True) if self.optim_state_dict_config is None: self.optim_state_dict_config = ShardedOptimStateDictConfig(offload_to_cpu=True) def set_auto_wrap_policy(self, model): """ Given `model`, creates an `auto_wrap_policy` baesd on the passed in policy and if we can use the `transformer_cls_to_wrap` """ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy # First base off of `_no_split_modules` no_split_modules = getattr(model, "_no_split_modules", None) default_transformer_cls_names_to_wrap = ( ",".join(model._no_split_modules) if no_split_modules is not None else "" ) if self.auto_wrap_policy == transformer_auto_wrap_policy: if self.transformer_cls_names_to_wrap is None: self.transformer_cls_names_to_wrap = default_transformer_cls_names_to_wrap transformer_cls_to_wrap = set() for layer_class in self.transformer_cls_names_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise ValueError(f"Could not find the transformer layer class {layer_class} in the model.") transformer_cls_to_wrap.add(transformer_cls) # Finally we set the auto_wrap_policy to a callable self.auto_wrap_policy = functools.partial( self.auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap ) elif self.auto_wrap_policy == size_based_auto_wrap_policy: # If zero, we silently ignore it. if self.min_num_params > 0: self.auto_wrap_policy = functools.partial(self.auto_wrap_policy, min_num_params=self.min_num_params) else: self.auto_wrap_policy = None def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False): "Sets the mixed precision policy for FSDP" mixed_precision_mapping = { "fp8": torch.bfloat16, "fp16": torch.float16, "bf16": torch.bfloat16, "fp32": torch.float32, } dtype = mixed_precision if isinstance(mixed_precision, str): dtype = mixed_precision_mapping.get(mixed_precision, None) if dtype is None: raise ValueError( f"Invalid mixed precision: {mixed_precision}. Must be one of {list(mixed_precision_mapping.keys())}" ) elif isinstance(mixed_precision, torch.dtype) and mixed_precision not in mixed_precision_mapping.values(): raise ValueError( f"Invalid mixed precision: {mixed_precision}. Must be one of {list(mixed_precision_mapping.values())}" ) buffer_type = torch.float32 if buffer_autocast else dtype from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision if override or self.mixed_precision_policy is None: self.mixed_precision_policy = MixedPrecision( param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_type ) elif isinstance(self.mixed_precision_policy, dict): # Check for incompatible types missing_keys = [ k for k in ["param_dtype", "reduce_dtype", "buffer_dtype"] if k not in self.mixed_precision_policy ] invalid_values = [ k for k, v in self.mixed_precision_policy.items() if v not in mixed_precision_mapping.values() ] if missing_keys or invalid_values: raise ValueError( f"Invalid mixed precision policy: {self.mixed_precision_policy}. " f"Must be a `dict` with keys `param_dtype`, `reduce_dtype`, and `buffer_dtype`. " f"Values must be one of {list(mixed_precision_mapping.values())}" ) self.mixed_precision_policy = MixedPrecision(**self.mixed_precision_policy) @dataclass class MegatronLMPlugin: """ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective activation recomputation and optimized fused kernels. """ tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."}) pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."}) num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."}) gradient_clipping: float = field( default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"} ) sequence_parallelism: bool = field( default=None, metadata={"help": "enable sequence parallelism"}, ) recompute_activations: bool = field( default=None, metadata={"help": "enable selective activation recomputation"}, ) use_distributed_optimizer: bool = field( default=None, metadata={"help": "enable distributed optimizer"}, ) pipeline_model_parallel_split_rank: int = field( default=None, metadata={"help": "Rank where encoder and decoder should be split."} ) num_layers_per_virtual_pipeline_stage: int = field( default=None, metadata={"help": "Number of layers per virtual pipeline stage."} ) is_train_batch_min: str = field( default=True, metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"}, ) train_iters: int = field( default=None, metadata={ "help": "Total number of iterations to train over all training runs. " "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" }, ) train_samples: int = field( default=None, metadata={ "help": "Total number of samples to train over all training runs. " "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" }, ) weight_decay_incr_style: str = field( default="constant", metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '}, ) start_weight_decay: float = field( default=None, metadata={"help": "Initial weight decay coefficient for L2 regularization."}, ) end_weight_decay: float = field( default=None, metadata={"help": "End of run weight decay coefficient for L2 regularization."}, ) lr_decay_style: str = field( default="linear", metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."}, ) lr_decay_iters: int = field( default=None, metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."}, ) lr_decay_samples: int = field( default=None, metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."}, ) lr_warmup_iters: int = field( default=None, metadata={"help": "number of iterations to linearly warmup learning rate over."}, ) lr_warmup_samples: int = field( default=None, metadata={"help": "number of samples to linearly warmup learning rate over."}, ) lr_warmup_fraction: float = field( default=None, metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."}, ) min_lr: float = field( default=0, metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."}, ) consumed_samples: List[int] = field( default=None, metadata={ "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call." }, ) no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."}) scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."}) lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."}) megatron_dataset_flag: bool = field( default=False, metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."}, ) seq_length: int = field( default=None, metadata={"help": "Maximum sequence length to process."}, ) encoder_seq_length: int = field( default=None, metadata={"help": "Maximum sequence length to process for the encoder."}, ) decoder_seq_length: int = field( default=None, metadata={"help": "Maximum sequence length to process for the decoder."}, ) tensorboard_dir: str = field( default=None, metadata={"help": "Path to save tensorboard logs."}, ) set_all_logging_options: bool = field( default=False, metadata={"help": "Whether to set all logging options."}, ) eval_iters: int = field( default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."} ) eval_interval: int = field( default=1000, metadata={"help": "Interval between running evaluation on validation set."} ) return_logits: bool = field( default=False, metadata={"help": "Whether to return logits from the model."}, ) # custom train step args custom_train_step_class: Optional[Any] = field( default=None, metadata={"help": "Custom train step class."}, ) custom_train_step_kwargs: Optional[Dict[str, Any]] = field( default=None, metadata={"help": "Custom train step kwargs."}, ) # custom model args custom_model_provider_function: Optional[Callable] = field( default=None, metadata={"help": "Custom model provider function."}, ) custom_prepare_model_function: Optional[Callable] = field( default=None, metadata={"help": "Custom prepare model function."}, ) custom_megatron_datasets_provider_function: Optional[Callable] = field( default=None, metadata={"help": "Custom megatron train_valid_test datasets provider function."}, ) custom_get_batch_function: Optional[Callable] = field( default=None, metadata={"help": "Custom get batch function."}, ) custom_loss_function: Optional[Callable] = field( default=None, metadata={"help": "Custom loss function."}, ) # remaining args such as enabling Alibi/ROPE positional embeddings, # wandb logging, Multi-Query Attention, etc. other_megatron_args: Optional[Dict[str, Any]] = field( default=None, metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"}, ) def __post_init__(self): prefix = "MEGATRON_LM_" if self.tp_degree is None: self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1)) if self.pp_degree is None: self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1)) if self.num_micro_batches is None: self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1)) if self.gradient_clipping is None: self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0)) if self.recompute_activations is None: self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1 if self.use_distributed_optimizer is None: self.use_distributed_optimizer = ( str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1 ) if self.sequence_parallelism is None: self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1 if self.pp_degree > 1 or self.use_distributed_optimizer: self.DDP_impl = "local" else: self.DDP_impl = "torch" if self.consumed_samples is not None: if len(self.consumed_samples) == 1: self.consumed_samples.extend([0, 0]) elif len(self.consumed_samples) == 2: self.consumed_samples.append(0) self.megatron_lm_default_args = { "tensor_model_parallel_size": self.tp_degree, "pipeline_model_parallel_size": self.pp_degree, "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank, "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage, "DDP_impl": self.DDP_impl, "use_distributed_optimizer": self.use_distributed_optimizer, "sequence_parallel": self.sequence_parallelism, "clip_grad": self.gradient_clipping, "num_micro_batches": self.num_micro_batches, "consumed_samples": self.consumed_samples, "no_wd_decay_cond": self.no_wd_decay_cond, "scale_lr_cond": self.scale_lr_cond, "lr_mult": self.lr_mult, "megatron_dataset_flag": self.megatron_dataset_flag, "eval_iters": self.eval_iters, "eval_interval": self.eval_interval, } if self.recompute_activations: self.megatron_lm_default_args["recompute_granularity"] = "selective" if self.tensorboard_dir is not None: self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir if self.set_all_logging_options: self.set_tensorboard_logging_options() if self.other_megatron_args is not None: self.megatron_lm_default_args.update(self.other_megatron_args) def set_network_size_args(self, model, batch_data=None): model_config_type = model.config.model_type.lower() for model_type in MODEL_CONFIGS_TO_MEGATRON_PARSERS.keys(): if model_type in model_config_type: MODEL_CONFIGS_TO_MEGATRON_PARSERS[model_type](self, model, batch_data) return raise ValueError( f"Accelerate Megatron-LM integration not supports {model_config_type} model. " "You can add your own model config parser." ) def set_mixed_precision(self, mixed_precision): if mixed_precision == "fp16": self.megatron_lm_default_args["fp16"] = True elif mixed_precision == "bf16": self.megatron_lm_default_args["bf16"] = True self.DDP_impl = "local" self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl def set_training_args(self, micro_batch_size, dp_degree): self.data_parallel_size = dp_degree self.micro_batch_size = micro_batch_size self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size def set_optimizer_type(self, optimizer): optimizer_name = optimizer.__class__.__name__.lower() if "adam" in optimizer_name: self.megatron_lm_default_args["optimizer"] = "adam" self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0] self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1] self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"] elif "sgd" in optimizer_name: self.megatron_lm_default_args["optimizer"] = "sgd" self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"] else: raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM") self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"] self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"] def set_scheduler_args(self, scheduler): if self.train_iters is None: self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"] if self.train_samples is not None: self.train_samples = None warnings.warn( "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training." ) if self.lr_warmup_iters is None: self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"] if self.lr_warmup_samples is not None: warnings.warn( "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training." ) self.lr_warmup_samples = 0 self.megatron_lm_default_args["train_iters"] = self.train_iters self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters self.megatron_lm_default_args["train_samples"] = self.train_samples self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay self.megatron_lm_default_args["min_lr"] = self.min_lr def set_tensorboard_logging_options(self): from megatron.arguments import _add_logging_args parser = argparse.ArgumentParser() parser = _add_logging_args(parser) logging_args = parser.parse_known_args() self.dataset_args = vars(logging_args[0]) for key, value in self.dataset_args.items(): if key.startswith("log_"): self.megatron_lm_default_args[key] = True elif key.startswith("no_log_"): self.megatron_lm_default_args[key.replace("no_", "")] = True MODEL_CONFIGS_TO_MEGATRON_PARSERS = {} def add_model_config_to_megatron_parser(model_type: str): def add_model_config_parser_helper(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) MODEL_CONFIGS_TO_MEGATRON_PARSERS[model_type] = func return wrapper return add_model_config_parser_helper @add_model_config_to_megatron_parser("megatron-bert") def parse_bert_config(megatron_lm_plugin, model, batch_data): model_type_name = "bert" num_layers = model.config.num_hidden_layers hidden_size = model.config.hidden_size num_attention_heads = model.config.num_attention_heads max_position_embeddings = model.config.max_position_embeddings num_labels = model.config.num_labels orig_vocab_size = model.config.vocab_size pretraining_flag = False if "maskedlm" in model.__class__.__name__.lower(): pretraining_flag = True if megatron_lm_plugin.seq_length is not None: if megatron_lm_plugin.encoder_seq_length is not None: warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.") megatron_lm_plugin.seq_length = megatron_lm_plugin.encoder_seq_length elif megatron_lm_plugin.encoder_seq_length is not None: megatron_lm_plugin.seq_length = megatron_lm_plugin.encoder_seq_length elif batch_data is not None: megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict megatron_lm_plugin.megatron_lm_default_args["num_labels"] = num_labels @add_model_config_to_megatron_parser("gpt2") def parse_gpt2_config(megatron_lm_plugin, model, batch_data): model_type_name = "gpt" num_layers = model.config.n_layer hidden_size = model.config.n_embd num_attention_heads = model.config.n_head max_position_embeddings = model.config.n_positions orig_vocab_size = model.config.vocab_size pretraining_flag = True if megatron_lm_plugin.seq_length is not None: if megatron_lm_plugin.decoder_seq_length is not None: warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.") megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length elif megatron_lm_plugin.decoder_seq_length is not None: megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length elif batch_data is not None: megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length megatron_lm_plugin.megatron_lm_default_args["return_logits"] = megatron_lm_plugin.return_logits megatron_lm_plugin.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer" megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict @add_model_config_to_megatron_parser("t5") def parse_t5_config(megatron_lm_plugin, model, batch_data): model_type_name = "t5" num_layers = model.config.num_layers hidden_size = model.config.d_model num_attention_heads = model.config.num_heads max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024 orig_vocab_size = model.config.vocab_size pretraining_flag = True if megatron_lm_plugin.encoder_seq_length is None: if batch_data is not None: megatron_lm_plugin.encoder_seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.encoder_seq_length = max_position_embeddings if megatron_lm_plugin.decoder_seq_length is None: if batch_data is not None: megatron_lm_plugin.decoder_seq_length = batch_data["labels"].shape[1] else: megatron_lm_plugin.decoder_seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["encoder_seq_length"] = megatron_lm_plugin.encoder_seq_length megatron_lm_plugin.megatron_lm_default_args["decoder_seq_length"] = megatron_lm_plugin.decoder_seq_length megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict @add_model_config_to_megatron_parser("llama") def parse_llama_config(megatron_lm_plugin, model, batch_data): model_type_name = "gpt" num_layers = model.config.num_hidden_layers pretraining_flag = True hidden_size = model.config.hidden_size num_attention_heads = model.config.num_attention_heads orig_vocab_size = model.config.vocab_size max_position_embeddings = model.config.max_position_embeddings seq_length = getattr(model.config, "max_sequence_length", None) if megatron_lm_plugin.seq_length is None: if seq_length is not None: megatron_lm_plugin.seq_length = seq_length elif megatron_lm_plugin.decoder_seq_length is not None: megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length elif batch_data is not None: megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["return_logits"] = megatron_lm_plugin.return_logits megatron_lm_plugin.megatron_lm_default_args["tokenizer_type"] = "Llama2Tokenizer" megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict @dataclass class BnbQuantizationConfig: """ A plugin to enable BitsAndBytes 4bit and 8bit quantization """ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."}) llm_int8_threshold: float = field( default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"} ) load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."}) bnb_4bit_quant_type: str = field( default="fp4", metadata={ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}." }, ) bnb_4bit_use_double_quant: bool = field( default=False, metadata={ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again." }, ) bnb_4bit_compute_dtype: bool = field( default="fp16", metadata={ "help": "This sets the computational type which might be different than the input time. For example, inputs might be " "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}." }, ) torch_dtype: torch.dtype = field( default=None, metadata={ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value" "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model " }, ) skip_modules: List[str] = field( default=None, metadata={ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`." }, ) keep_in_fp32_modules: List[str] = field( default=None, metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."}, ) def __post_init__(self): """ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.load_in_8bit, bool): raise ValueError("load_in_8bit must be a boolean") if not isinstance(self.load_in_4bit, bool): raise ValueError("load_in_4bit must be a boolean") if self.load_in_4bit and self.load_in_8bit: raise ValueError("load_in_4bit and load_in_8 can't be both True") if not self.load_in_4bit and not self.load_in_8bit: raise ValueError("load_in_4bit and load_in_8 can't be both False") if not isinstance(self.llm_int8_threshold, (int, float)): raise ValueError("llm_int8_threshold must be a float or an int") if not isinstance(self.bnb_4bit_quant_type, str): raise ValueError("bnb_4bit_quant_type must be a string") elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]: raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}") if not isinstance(self.bnb_4bit_use_double_quant, bool): raise ValueError("bnb_4bit_use_double_quant must be a boolean") if isinstance(self.bnb_4bit_compute_dtype, str): if self.bnb_4bit_compute_dtype == "fp32": self.bnb_4bit_compute_dtype = torch.float32 elif self.bnb_4bit_compute_dtype == "fp16": self.bnb_4bit_compute_dtype = torch.float16 elif self.bnb_4bit_compute_dtype == "bf16": self.bnb_4bit_compute_dtype = torch.bfloat16 else: raise ValueError( f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}" ) elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") if self.skip_modules is not None and not isinstance(self.skip_modules, list): raise ValueError("skip_modules must be a list of strings") if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list): raise ValueError("keep_in_fp_32_modules must be a list of strings") if self.load_in_4bit: self.target_dtype = CustomDtype.INT4 if self.load_in_8bit: self.target_dtype = torch.int8 if self.load_in_4bit and self.llm_int8_threshold != 6.0: warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit") if isinstance(self.torch_dtype, str): if self.torch_dtype == "fp32": self.torch_dtype = torch.float32 elif self.torch_dtype == "fp16": self.torch_dtype = torch.float16 elif self.torch_dtype == "bf16": self.torch_dtype = torch.bfloat16 else: raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}") if self.load_in_8bit and self.torch_dtype is None: self.torch_dtype = torch.float16 if self.load_in_4bit and self.torch_dtype is None: self.torch_dtype = self.bnb_4bit_compute_dtype if not isinstance(self.torch_dtype, torch.dtype): raise ValueError("torch_dtype must be a torch.dtype") def get_module_class_from_name(module, name): """ Gets a class from a module by its name. Args: module (`torch.nn.Module`): The module to get the class from. name (`str`): The name of the class. """ modules_children = list(module.children()) if module.__class__.__name__ == name: return module.__class__ elif len(modules_children) == 0: return else: for child_module in modules_children: module_class = get_module_class_from_name(child_module, name) if module_class is not None: return module_class
accelerate/src/accelerate/utils/dataclasses.py/0
{ "file_path": "accelerate/src/accelerate/utils/dataclasses.py", "repo_id": "accelerate", "token_count": 42303 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from types import MethodType import torch.nn as nn from .imports import is_fp8_available from .operations import GatheredParameters # Do not import `transformer_engine` at package level to avoid potential issues def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True): """ Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart. """ if not is_fp8_available(): raise ImportError("Using `convert_model` requires transformer_engine to be installed.") import transformer_engine.pytorch as te for name, module in model.named_children(): if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear: has_bias = module.bias is not None params_to_gather = [module.weight] if has_bias: params_to_gather.append(module.bias) with GatheredParameters(params_to_gather, modifier_rank=0): if any(p % 16 != 0 for p in module.weight.shape): return te_module = te.Linear( module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype ) te_module.weight.copy_(module.weight) if has_bias: te_module.bias.copy_(module.bias) setattr(model, name, te_module) # Note: @xrsrke (Phuc) found that te.LayerNorm doesn't have any real memory savings or speedups over nn.LayerNorm elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln: with GatheredParameters([module.weight, module.bias], modifier_rank=0): te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) te_module.weight.copy_(module.weight) te_module.bias.copy_(module.bias) setattr(model, name, te_module) elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear: has_bias = module.bias is not None new_module = nn.Linear( module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype ) new_module.weight.copy_(module.weight) if has_bias: new_module.bias.copy_(module.bias) setattr(model, name, new_module) elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln: new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) new_module.weight.copy_(module.weight) new_module.bias.copy_(module.bias) setattr(model, name, new_module) else: convert_model( module, to_transformer_engine=to_transformer_engine, _convert_linear=_convert_linear, _convert_ln=_convert_ln, ) def has_transformer_engine_layers(model): """ Returns whether a given model has some `transformer_engine` layer or not. """ if not is_fp8_available(): raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") import transformer_engine.pytorch as te for m in model.modules(): if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)): return True return False def contextual_fp8_autocast(model_forward, fp8_recipe, use_during_eval=False): """ Wrapper for a model's forward method to apply FP8 autocast. Is context aware, meaning that by default it will disable FP8 autocast during eval mode, which is generally better for more accurate metrics. """ if not is_fp8_available(): raise ImportError("Using `contextual_fp8_autocast` requires transformer_engine to be installed.") from transformer_engine.pytorch import fp8_autocast def forward(self, *args, **kwargs): enabled = use_during_eval or self.training with fp8_autocast(enabled=enabled, fp8_recipe=fp8_recipe): return model_forward(*args, **kwargs) # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` forward.__wrapped__ = model_forward return forward def apply_fp8_autowrap(model, fp8_recipe_handler): """ Applies FP8 context manager to the model's forward method """ if not is_fp8_available(): raise ImportError("Using `apply_fp8_autowrap` requires transformer_engine to be installed.") import transformer_engine.common.recipe as te_recipe kwargs = fp8_recipe_handler.to_kwargs() if fp8_recipe_handler is not None else {} if "fp8_format" in kwargs: kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) use_during_eval = kwargs.pop("use_autocast_during_eval", False) fp8_recipe = te_recipe.DelayedScaling(**kwargs) new_forward = contextual_fp8_autocast(model.forward, fp8_recipe, use_during_eval) if hasattr(model.forward, "__func__"): model.forward = MethodType(new_forward, model) else: model.forward = new_forward return model
accelerate/src/accelerate/utils/transformer_engine.py/0
{ "file_path": "accelerate/src/accelerate/utils/transformer_engine.py", "repo_id": "accelerate", "token_count": 2373 }
9
compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: 'NO' downcast_bf16: 'no' fsdp_config: {} gpu_ids: all machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false tpu_name: 'test-tpu' tpu_zone: 'us-central1-a' commands: null command_file: tests/test_samples/test_command_file.sh
accelerate/tests/test_configs/latest.yaml/0
{ "file_path": "accelerate/tests/test_configs/latest.yaml", "repo_id": "accelerate", "token_count": 186 }
10
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class MockLaunchConfig(SageMakerConfig): compute_environment = ComputeEnvironment.AMAZON_SAGEMAKER fp16 = True ec2_instance_type = "ml.p3.2xlarge" iam_role_name = "accelerate_sagemaker_execution_role" profile = "hf-sm" region = "us-east-1" num_machines = 1 base_job_name = "accelerate-sagemaker-1" pytorch_version = "1.6" transformers_version = "4.4" training_script = "train.py" success_training_script_args = [ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] fail_training_script_args = [ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class SageMakerLaunch(unittest.TestCase): def test_args_convert(self): # If no defaults are changed, `to_kwargs` returns an empty dict. converted_args = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) assert isinstance(converted_args["model_name_or_path"], str) assert isinstance(converted_args["do_train"], bool) assert isinstance(converted_args["epochs"], int) assert isinstance(converted_args["learning_rate"], float) assert isinstance(converted_args["max_steps"], float) with pytest.raises(ValueError): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
accelerate/tests/test_sagemaker.py/0
{ "file_path": "accelerate/tests/test_sagemaker.py", "repo_id": "accelerate", "token_count": 1007 }
11
# Comparing Preference Alignment Algorithms This directory contains various comparisons for three algorithms: DPO, IPO, and KTO. Each algorithm has been run in different hyperparameter configurations to study their performance. Two different models and datasets have been used to compare the performance of each algorithm: - zephyr-beta-sft and Ultrafeedback - OpenHermes-2.5 and the OpenOrca datasets We release a collection containing the datasets and models used for these experiments, if you require the other trained models, we can release them on request. You can find a longer description of these results in our [blogpost](https://huggingface.co/blog/pref-tuning) ## Comparisons For each algorithm, we aim to tune the beta parameter for a fixed learning rate. We vary beta from 0.1-0.9 in steps of 0.1, we have also found that in certain configurations a tiny value of beta, 0.01, can be effective. So we have included this smaller value in all our comparisons. ## Usage The experiments can be launched with the following bash script: ```bash #!/bin/bash # Define an array containing the base configs we wish to fine tune configs=("zephyr" "openhermes") # Define an array of loss types loss_types=("sigmoid" "kto_pair" "ipo") # Define an array of beta values betas=("0.01" "0.1" "0.2" "0.3" "0.4" "0.5" "0.6" "0.7" "0.8" "0.9") # Outer loop for loss types for config in "${configs[@]}"; do for loss_type in "${loss_types[@]}"; do # Inner loop for beta values for beta in "${betas[@]}"; do # Determine the job name and model revision based on loss type job_name="$config_${loss_type}_beta_${beta}" model_revision="${loss_type}-${beta}" # Submit the job sbatch --job-name=${job_name} recipes/launch.slurm pref_align_scan dpo $config deepspeed_zero3 \ "--beta=${beta} --loss_type=${loss_type} --output_dir=data/$config-7b-align-scan-${loss_type}-beta-${beta} --hub_model_revision=${model_revision}" done done done ```
alignment-handbook/recipes/pref_align_scan/README.md/0
{ "file_path": "alignment-handbook/recipes/pref_align_scan/README.md", "repo_id": "alignment-handbook", "token_count": 691 }
12
# Instructions to Replicate Zephyr 7B Gemma Similar to how we trained Zephyr 7B Beta in our [technical report](https://huggingface.co/papers/2310.16944), training this model proceeds in two steps: 1. Apply SFT to fine-tune Gemma 7B on the Deita 10k dataset ([link](https://huggingface.co/datasets/HuggingFaceH4/deita-10k-v0-sft)). The result is an SFT model like [`zephyr-7b-gemma-sft`](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-sft-v0.1). 2. Align the SFT model to AI feedback via DPO on a curated mix of 7k examples by Argilla ([link](https://huggingface.co/datasets/argilla/dpo-mix-7k)). The result is a DPO model like [`zephyr-7b-gemma`](HuggingFaceH4/zephyr-7b-gemma-v0.1). See below for commands to train these models using either DeepSpeed ZeRO-3 or LoRA. ## Full training examples You will require 8 GPUs (80GB of VRAM) to train the full model - alternatively, you can train on 1 GPU by adjusting the micro batch size and gradient accumulation steps to keep the global batch size constant. A recipe involving QLoRA will come later 🤗. ```shell # Step 1 - SFT ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/zephyr-7b-gemma/sft/config_full.yaml # Step 2 - DPO ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/zephyr-7b-gemma/dpo/config_full.yaml ```
alignment-handbook/recipes/zephyr-7b-gemma/README.md/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-gemma/README.md", "repo_id": "alignment-handbook", "token_count": 505 }
13
# Porting a custom kernel
candle/candle-book/src/cuda/porting.md/0
{ "file_path": "candle/candle-book/src/cuda/porting.md", "repo_id": "candle", "token_count": 7 }
14
# Simplified ## How its works This program implements a neural network to predict the winner of the second round of elections based on the results of the first round. Basic moments: 1. A multilayer perceptron with two hidden layers is used. The first hidden layer has 4 neurons, the second has 2 neurons. 2. The input is a vector of 2 numbers - the percentage of votes for the first and second candidates in the first stage. 3. The output is the number 0 or 1, where 1 means that the first candidate will win in the second stage, 0 means that he will lose. 4. For training, samples with real data on the results of the first and second stages of different elections are used. 5. The model is trained by backpropagation using gradient descent and the cross-entropy loss function. 6. Model parameters (weights of neurons) are initialized randomly, then optimized during training. 7. After training, the model is tested on a deferred sample to evaluate the accuracy. 8. If the accuracy on the test set is below 100%, the model is considered underfit and the learning process is repeated. Thus, this neural network learns to find hidden relationships between the results of the first and second rounds of voting in order to make predictions for new data. ```rust,ignore {{#include ../simplified.rs:book_training_simplified1}} ``` ```rust,ignore {{#include ../simplified.rs:book_training_simplified2}} ``` ```rust,ignore {{#include ../simplified.rs:book_training_simplified3}} ``` ## Example output ```bash Trying to train neural network. Epoch: 1 Train loss: 4.42555 Test accuracy: 0.00% Epoch: 2 Train loss: 0.84677 Test accuracy: 33.33% Epoch: 3 Train loss: 2.54335 Test accuracy: 33.33% Epoch: 4 Train loss: 0.37806 Test accuracy: 33.33% Epoch: 5 Train loss: 0.36647 Test accuracy: 100.00% real_life_votes: [13, 22] neural_network_prediction_result: 0.0 ```
candle/candle-book/src/training/simplified.md/0
{ "file_path": "candle/candle-book/src/training/simplified.md", "repo_id": "candle", "token_count": 530 }
15
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::str::FromStr; use anyhow::Result; use candle_core::{Device, Tensor}; fn cos_sin(n: usize, device: &Device) -> Result<Tensor> { let thetas: Vec<_> = (0..n).map(|i| (i as f32 / n as f32)).collect(); let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect(); let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect(); let xs = Tensor::from_vec(xs, (n, 1), device)?; let ys = Tensor::from_vec(ys, (1, n), device)?; let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?; Ok(xs.matmul(&ys)?) } fn main() -> Result<()> { let device = Device::new_cuda(0)?; let args = std::env::args().collect::<Vec<String>>(); let n = if args.len() < 2 { 2000usize } else { usize::from_str(&args[1])? }; let xys_cpu = cos_sin(n, &Device::Cpu)?; let xys = cos_sin(n, &device)?; println!("{xys_cpu:?} {xys:?}"); let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?; println!("{sum_keepdim_cpu}"); let sum_keepdim = xys.sum_keepdim(1)?; println!("{sum_keepdim}"); let start = std::time::Instant::now(); let n_iters = 100; let mut v = 0f32; for _i in 0..n_iters { let sum_keepdim = xys.sum_keepdim(1)?; let sum_keepdim = sum_keepdim.sum_keepdim(0)?; let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?; v += sum_keepdim; } let elapsed = start.elapsed(); if v > 0. { println!( "ran {n_iters} iterations, time per iter: {:?} ({v})", elapsed.div_f64(n_iters as f64) ); } Ok(()) }
candle/candle-core/examples/cuda_sum_benchmark.rs/0
{ "file_path": "candle/candle-core/examples/cuda_sum_benchmark.rs", "repo_id": "candle", "token_count": 827 }
16
use crate::backend::BackendDevice; use crate::{CpuStorage, CpuStorageRef, DType, Layout, Result, Shape}; pub use candle_kernels as kernels; pub use cudarc; use cudarc::driver::{CudaFunction, LaunchAsync, LaunchConfig}; use half::{bf16, f16}; use std::sync::{Arc, Mutex}; use super::{CudaError, CudaStorage, CudaStorageSlice, WrapErr}; /// Unique identifier for cuda devices. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct DeviceId(usize); impl DeviceId { fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } struct CudaRng(cudarc::curand::CudaRng); unsafe impl Send for CudaRng {} #[derive(Clone)] pub struct CudaDevice { id: DeviceId, device: Arc<cudarc::driver::CudaDevice>, pub(crate) blas: Arc<cudarc::cublas::CudaBlas>, curand: Arc<Mutex<CudaRng>>, } impl std::fmt::Debug for CudaDevice { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "CudaDevice({:?})", self.id) } } impl std::ops::Deref for CudaDevice { type Target = Arc<cudarc::driver::CudaDevice>; fn deref(&self) -> &Self::Target { &self.device } } impl CudaDevice { pub fn cuda_device(&self) -> Arc<cudarc::driver::CudaDevice> { self.device.clone() } pub fn id(&self) -> DeviceId { self.id } fn const_impl(&self, v: f64, shape: &Shape, dtype: DType) -> Result<CudaStorage> { let elem_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(elem_count as u32); let slice = match dtype { DType::U8 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<u8>(elem_count) }.w()?; let func = self.get_or_load_func("fill_u8", kernels::FILL)?; let params = (&data, v as u8, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U8(data) } DType::U32 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<u32>(elem_count) }.w()?; let func = self.get_or_load_func("fill_u32", kernels::FILL)?; let params = (&data, v as u32, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U32(data) } DType::I64 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<i64>(elem_count) }.w()?; let func = self.get_or_load_func("fill_i64", kernels::FILL)?; let params = (&data, v as i64, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::I64(data) } DType::BF16 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<bf16>(elem_count) }.w()?; let func = self.get_or_load_func("fill_bf16", kernels::FILL)?; let params = (&data, bf16::from_f64(v), elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::BF16(data) } DType::F16 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<f16>(elem_count) }.w()?; let func = self.get_or_load_func("fill_f16", kernels::FILL)?; let params = (&data, f16::from_f64(v), elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F16(data) } DType::F32 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<f32>(elem_count) }.w()?; let func = self.get_or_load_func("fill_f32", kernels::FILL)?; let params = (&data, v as f32, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F32(data) } DType::F64 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<f64>(elem_count) }.w()?; let func = self.get_or_load_func("fill_f64", kernels::FILL)?; let params = (&data, v, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } pub fn get_or_load_func(&self, module_name: &str, ptx: &'static str) -> Result<CudaFunction> { if !self.has_func(module_name, module_name) { // Leaking the string here is a bit sad but we need a &'static str and this is only // done once per kernel name. let static_module_name = Box::leak(module_name.to_string().into_boxed_str()); self.load_ptx(ptx.into(), module_name, &[static_module_name]) .map_err(|cuda| CudaError::Load { cuda, module_name: module_name.to_string(), }) .w()?; } self.get_func(module_name, module_name) // Clippy recommends this `ok_or` rather than `ok_or_else` so hopefully the compiler is // able to only build the error value if needed. .ok_or(CudaError::MissingKernel { module_name: module_name.to_string(), }) .w() } } impl BackendDevice for CudaDevice { type Storage = CudaStorage; fn new(ordinal: usize) -> Result<Self> { let device = cudarc::driver::CudaDevice::new(ordinal).w()?; let blas = cudarc::cublas::CudaBlas::new(device.clone()).w()?; let curand = cudarc::curand::CudaRng::new(299792458, device.clone()).w()?; Ok(Self { id: DeviceId::new(), device, blas: Arc::new(blas), curand: Arc::new(Mutex::new(CudaRng(curand))), }) } fn set_seed(&self, seed: u64) -> Result<()> { // We do not call set_seed but instead create a new curand object. This ensures that the // state will be identical and the same random numbers will be generated. let mut curand = self.curand.lock().unwrap(); curand.0 = cudarc::curand::CudaRng::new(seed, self.device.clone()).w()?; Ok(()) } fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Cuda { gpu_id: self.device.ordinal(), } } fn same_device(&self, rhs: &Self) -> bool { self.id == rhs.id } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> { let elem_count = shape.elem_count(); let slice = match dtype { DType::U8 => { let data = self.alloc_zeros::<u8>(elem_count).w()?; CudaStorageSlice::U8(data) } DType::U32 => { let data = self.alloc_zeros::<u32>(elem_count).w()?; CudaStorageSlice::U32(data) } DType::I64 => { let data = self.alloc_zeros::<i64>(elem_count).w()?; CudaStorageSlice::I64(data) } DType::BF16 => { let data = self.alloc_zeros::<bf16>(elem_count).w()?; CudaStorageSlice::BF16(data) } DType::F16 => { let data = self.alloc_zeros::<f16>(elem_count).w()?; CudaStorageSlice::F16(data) } DType::F32 => { let data = self.alloc_zeros::<f32>(elem_count).w()?; CudaStorageSlice::F32(data) } DType::F64 => { let data = self.alloc_zeros::<f64>(elem_count).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn rand_uniform(&self, shape: &Shape, dtype: DType, lo: f64, up: f64) -> Result<CudaStorage> { let elem_count = shape.elem_count(); let curand = self.curand.lock().unwrap(); let slice = match dtype { // TODO: Add support for F16 and BF16 though this is likely to require some upstream // cudarc changes. DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 => { Err(CudaError::UnsupportedDtype { dtype, op: "rand_uniform", }) .w()? } DType::F32 => { let mut data = unsafe { self.alloc::<f32>(elem_count) }.w()?; curand.0.fill_with_uniform(&mut data).w()?; CudaStorageSlice::F32(data) } DType::F64 => { let mut data = unsafe { self.alloc::<f64>(elem_count) }.w()?; curand.0.fill_with_uniform(&mut data).w()?; CudaStorageSlice::F64(data) } }; let slice = if lo == 0. && up == 1.0 { slice } else { use super::utils::Map1; let layout = Layout::contiguous(shape); super::Affine(up - lo, lo).map(&slice, self, &layout)? }; Ok(CudaStorage { slice, device: self.clone(), }) } fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CudaStorage> { // TODO: Add support for F16 and BF16 though this is likely to require some upstream // cudarc changes. let elem_count = shape.elem_count(); let curand = self.curand.lock().unwrap(); // curand can only generate an odd number of values. // https://github.com/huggingface/candle/issues/734 let elem_count_round = if elem_count % 2 == 1 { elem_count + 1 } else { elem_count }; let slice = match dtype { DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 => { Err(CudaError::UnsupportedDtype { dtype, op: "rand_normal", }) .w()? } DType::F32 => { let mut data = unsafe { self.alloc::<f32>(elem_count_round) }.w()?; curand .0 .fill_with_normal(&mut data, mean as f32, std as f32) .w()?; CudaStorageSlice::F32(data) } DType::F64 => { let mut data = unsafe { self.alloc::<f64>(elem_count_round) }.w()?; curand.0.fill_with_normal(&mut data, mean, std).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> { self.const_impl(1., shape, dtype) } unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<Self::Storage> { let elem_count = shape.elem_count(); let slice = match dtype { DType::U8 => { let data = self.alloc::<u8>(elem_count).w()?; CudaStorageSlice::U8(data) } DType::U32 => { let data = self.alloc::<u32>(elem_count).w()?; CudaStorageSlice::U32(data) } DType::I64 => { let data = self.alloc::<i64>(elem_count).w()?; CudaStorageSlice::I64(data) } DType::BF16 => { let data = self.alloc::<bf16>(elem_count).w()?; CudaStorageSlice::BF16(data) } DType::F16 => { let data = self.alloc::<f16>(elem_count).w()?; CudaStorageSlice::F16(data) } DType::F32 => { let data = self.alloc::<f32>(elem_count).w()?; CudaStorageSlice::F32(data) } DType::F64 => { let data = self.alloc::<f64>(elem_count).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> { let slice = match T::cpu_storage_ref(s) { CpuStorageRef::U8(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::U8(data) } CpuStorageRef::U32(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::U32(data) } CpuStorageRef::I64(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::I64(data) } CpuStorageRef::BF16(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::BF16(data) } CpuStorageRef::F16(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F16(data) } CpuStorageRef::F32(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F32(data) } CpuStorageRef::F64(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<CudaStorage> { let slice = match storage { CpuStorage::U8(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::U8(data) } CpuStorage::U32(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::U32(data) } CpuStorage::I64(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::I64(data) } CpuStorage::BF16(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::BF16(data) } CpuStorage::F16(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F16(data) } CpuStorage::F32(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F32(data) } CpuStorage::F64(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn storage_from_cpu_storage_owned(&self, storage: CpuStorage) -> Result<CudaStorage> { let slice = match storage { CpuStorage::U8(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::U8(data) } CpuStorage::U32(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::U32(data) } CpuStorage::I64(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::I64(data) } CpuStorage::BF16(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::BF16(data) } CpuStorage::F16(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::F16(data) } CpuStorage::F32(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::F32(data) } CpuStorage::F64(storage) => { let data = self.htod_copy(storage).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn synchronize(&self) -> Result<()> { self.device.synchronize().map_err(crate::Error::wrap)?; Ok(()) } }
candle/candle-core/src/cuda_backend/device.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/device.rs", "repo_id": "candle", "token_count": 9237 }
17
#![allow(dead_code)] use libc::{c_char, c_double, c_float, c_int}; mod ffi { use super::*; extern "C" { pub fn vsTanh(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdTanh(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsExp(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdExp(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsLn(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdLn(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsSin(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdSin(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsCos(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdCos(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsSqrt(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdSqrt(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsAdd(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdAdd(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsSub(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdSub(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsMul(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdMul(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsDiv(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdDiv(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsFmax(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdFmax(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsFmin(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdFmin(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn sgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_float, a: *const c_float, lda: *const c_int, b: *const c_float, ldb: *const c_int, beta: *const c_float, c: *mut c_float, ldc: *const c_int, ); pub fn dgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_double, a: *const c_double, lda: *const c_int, b: *const c_double, ldb: *const c_int, beta: *const c_double, c: *mut c_double, ldc: *const c_int, ); pub fn hgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const half::f16, a: *const half::f16, lda: *const c_int, b: *const half::f16, ldb: *const c_int, beta: *const half::f16, c: *mut half::f16, ldc: *const c_int, ); } } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn sgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f32, a: &[f32], lda: i32, b: &[f32], ldb: i32, beta: f32, c: &mut [f32], ldc: i32, ) { ffi::sgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn dgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f64, a: &[f64], lda: i32, b: &[f64], ldb: i32, beta: f64, c: &mut [f64], ldc: i32, ) { ffi::dgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn hgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: half::f16, a: &[half::f16], lda: i32, b: &[half::f16], ldb: i32, beta: half::f16, c: &mut [half::f16], ldc: i32, ) { ffi::hgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[inline] pub fn vs_exp(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_exp(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_ln(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_ln(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sin(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sin(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_cos(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_cos(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sqrt(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sqrt(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sqr(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sqr(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_tanh(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_tanh(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } // The vector functions from mkl can be performed in place by using the same array for input and // output. // https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/vector-mathematical-functions.html #[inline] pub fn vs_tanh_inplace(y: &mut [f32]) { unsafe { ffi::vsTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_tanh_inplace(y: &mut [f64]) { unsafe { ffi::vdTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_exp_inplace(y: &mut [f32]) { unsafe { ffi::vsExp(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_exp_inplace(y: &mut [f64]) { unsafe { ffi::vdExp(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vs_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vd_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vs_silu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = -v } vs_exp_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = v / (1.0 + *y) } } #[inline] pub fn vd_silu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = -v } vd_exp_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = v / (1.0 + *y) } } macro_rules! binary_op { ($fn_name:ident, $ty:ty, $mkl_name:ident) => { #[inline] pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) { let a_len = a.len(); let b_len = b.len(); let y_len = y.len(); if a_len != y_len || b_len != y_len { panic!( "{} a,b,y len mismatch {a_len} {b_len} {y_len}", stringify!($fn_name) ); } unsafe { ffi::$mkl_name(a_len as i32, a.as_ptr(), b.as_ptr(), y.as_mut_ptr()) } } }; } binary_op!(vs_add, f32, vsAdd); binary_op!(vd_add, f64, vdAdd); binary_op!(vs_sub, f32, vsSub); binary_op!(vd_sub, f64, vdSub); binary_op!(vs_mul, f32, vsMul); binary_op!(vd_mul, f64, vdMul); binary_op!(vs_div, f32, vsDiv); binary_op!(vd_div, f64, vdDiv); binary_op!(vs_max, f32, vsFmax); binary_op!(vd_max, f64, vdFmax); binary_op!(vs_min, f32, vsFmin); binary_op!(vd_min, f64, vdFmin);
candle/candle-core/src/mkl.rs/0
{ "file_path": "candle/candle-core/src/mkl.rs", "repo_id": "candle", "token_count": 6463 }
18
use crate::{DType, Device, Error, Result, Tensor, WithDType}; use safetensors::tensor as st; use safetensors::tensor::SafeTensors; use std::borrow::Cow; use std::collections::HashMap; use std::path::Path; impl From<DType> for st::Dtype { fn from(value: DType) -> Self { match value { DType::U8 => st::Dtype::U8, DType::U32 => st::Dtype::U32, DType::I64 => st::Dtype::I64, DType::BF16 => st::Dtype::BF16, DType::F16 => st::Dtype::F16, DType::F32 => st::Dtype::F32, DType::F64 => st::Dtype::F64, } } } impl TryFrom<st::Dtype> for DType { type Error = Error; fn try_from(value: st::Dtype) -> Result<Self> { match value { st::Dtype::U8 => Ok(DType::U8), st::Dtype::U32 => Ok(DType::U32), st::Dtype::I64 => Ok(DType::I64), st::Dtype::BF16 => Ok(DType::BF16), st::Dtype::F16 => Ok(DType::F16), st::Dtype::F32 => Ok(DType::F32), st::Dtype::F64 => Ok(DType::F64), dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)), } } } impl st::View for Tensor { fn dtype(&self) -> st::Dtype { self.dtype().into() } fn shape(&self) -> &[usize] { self.shape().dims() } fn data(&self) -> Cow<[u8]> { // This copies data from GPU to CPU. // TODO: Avoid the unwrap here. Cow::Owned(convert_back(self).unwrap()) } fn data_len(&self) -> usize { let n: usize = self.shape().elem_count(); let bytes_per_element = self.dtype().size_in_bytes(); n * bytes_per_element } } impl st::View for &Tensor { fn dtype(&self) -> st::Dtype { (*self).dtype().into() } fn shape(&self) -> &[usize] { self.dims() } fn data(&self) -> Cow<[u8]> { // This copies data from GPU to CPU. // TODO: Avoid the unwrap here. Cow::Owned(convert_back(self).unwrap()) } fn data_len(&self) -> usize { let n: usize = self.dims().iter().product(); let bytes_per_element = (*self).dtype().size_in_bytes(); n * bytes_per_element } } impl Tensor { pub fn save_safetensors<P: AsRef<Path>>(&self, name: &str, filename: P) -> Result<()> { let data = [(name, self.clone())]; Ok(st::serialize_to_file(data, &None, filename.as_ref())?) } } fn convert_slice<T: WithDType>(data: &[u8], shape: &[usize], device: &Device) -> Result<Tensor> { let size_in_bytes = T::DTYPE.size_in_bytes(); let elem_count = data.len() / size_in_bytes; if (data.as_ptr() as usize) % size_in_bytes == 0 { // SAFETY This is safe because we just checked that this // was correctly aligned. let data: &[T] = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) }; Tensor::from_slice(data, shape, device) } else { // XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast // Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access let mut c: Vec<T> = Vec::with_capacity(elem_count); // SAFETY: We just created c, so the allocated memory is necessarily // contiguous and non overlapping with the view's data. // We're downgrading the `c` pointer from T to u8, which removes alignment // constraints. unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len()); c.set_len(elem_count) } Tensor::from_slice(&c, shape, device) } } fn convert_slice_with_cast<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>( data: &[u8], shape: &[usize], device: &Device, conv: F, ) -> Result<Tensor> { let size_in_bytes = std::mem::size_of::<T>(); let elem_count = data.len() / size_in_bytes; if (data.as_ptr() as usize) % size_in_bytes == 0 { // SAFETY This is safe because we just checked that this // was correctly aligned. let data: &[T] = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) }; let data = data.iter().map(|t| conv(*t)).collect::<Result<Vec<_>>>()?; Tensor::from_vec(data, shape, device) } else { // XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast // Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access let mut c: Vec<T> = Vec::with_capacity(elem_count); // SAFETY: We just created c, so the allocated memory is necessarily // contiguous and non overlapping with the view's data. // We're downgrading the `c` pointer from T to u8, which removes alignment // constraints. unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len()); c.set_len(elem_count) } let c = c.into_iter().map(conv).collect::<Result<Vec<_>>>()?; Tensor::from_vec(c, shape, device) } } fn convert_with_cast_<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>( view: &st::TensorView<'_>, device: &Device, conv: F, ) -> Result<Tensor> { convert_slice_with_cast::<T, U, F>(view.data(), view.shape(), device, conv) } fn convert_<T: WithDType>(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> { convert_slice::<T>(view.data(), view.shape(), device) } fn convert_back_<T: WithDType>(mut vs: Vec<T>) -> Vec<u8> { let size_in_bytes = T::DTYPE.size_in_bytes(); let length = vs.len() * size_in_bytes; let capacity = vs.capacity() * size_in_bytes; let ptr = vs.as_mut_ptr() as *mut u8; // Don't run the destructor for Vec<T> std::mem::forget(vs); // SAFETY: // // Every T is larger than u8, so there is no issue regarding alignment. // This re-interpret the Vec<T> as a Vec<u8>. unsafe { Vec::from_raw_parts(ptr, length, capacity) } } pub trait Load { fn load(&self, device: &Device) -> Result<Tensor>; } impl<'a> Load for st::TensorView<'a> { fn load(&self, device: &Device) -> Result<Tensor> { convert(self, device) } } impl Tensor { pub fn from_raw_buffer( data: &[u8], dtype: DType, shape: &[usize], device: &Device, ) -> Result<Self> { match dtype { DType::U8 => convert_slice::<u8>(data, shape, device), DType::U32 => convert_slice::<u32>(data, shape, device), DType::I64 => convert_slice::<i64>(data, shape, device), DType::BF16 => convert_slice::<half::bf16>(data, shape, device), DType::F16 => convert_slice::<half::f16>(data, shape, device), DType::F32 => convert_slice::<f32>(data, shape, device), DType::F64 => convert_slice::<f64>(data, shape, device), } } } fn convert(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> { match view.dtype() { st::Dtype::U8 => convert_::<u8>(view, device), st::Dtype::U16 => { let conv = |x| Ok(u32::from(x)); convert_with_cast_::<u16, u32, _>(view, device, conv) } st::Dtype::U32 => convert_::<u32>(view, device), st::Dtype::I32 => { let conv = |x| Ok(i64::from(x)); convert_with_cast_::<i32, i64, _>(view, device, conv) } st::Dtype::I64 => convert_::<i64>(view, device), st::Dtype::BF16 => convert_::<half::bf16>(view, device), st::Dtype::F16 => convert_::<half::f16>(view, device), st::Dtype::F32 => convert_::<f32>(view, device), st::Dtype::F64 => convert_::<f64>(view, device), dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)), } } fn convert_back(tensor: &Tensor) -> Result<Vec<u8>> { // TODO: This makes an unnecessary copy when the tensor is on the cpu. let tensor = tensor.flatten_all()?; match tensor.dtype() { DType::U8 => Ok(convert_back_::<u8>(tensor.to_vec1()?)), DType::U32 => Ok(convert_back_::<u32>(tensor.to_vec1()?)), DType::I64 => Ok(convert_back_::<i64>(tensor.to_vec1()?)), DType::F16 => Ok(convert_back_::<half::f16>(tensor.to_vec1()?)), DType::BF16 => Ok(convert_back_::<half::bf16>(tensor.to_vec1()?)), DType::F32 => Ok(convert_back_::<f32>(tensor.to_vec1()?)), DType::F64 => Ok(convert_back_::<f64>(tensor.to_vec1()?)), } } pub fn load<P: AsRef<Path>>(filename: P, device: &Device) -> Result<HashMap<String, Tensor>> { let data = std::fs::read(filename.as_ref())?; load_buffer(&data[..], device) } pub fn load_buffer(data: &[u8], device: &Device) -> Result<HashMap<String, Tensor>> { let st = safetensors::SafeTensors::deserialize(data)?; st.tensors() .into_iter() .map(|(name, view)| Ok((name, view.load(device)?))) .collect() } pub fn save<K: AsRef<str> + Ord + std::fmt::Display, P: AsRef<Path>>( tensors: &HashMap<K, Tensor>, filename: P, ) -> Result<()> { Ok(st::serialize_to_file(tensors, &None, filename.as_ref())?) } #[derive(yoke::Yokeable)] struct SafeTensors_<'a>(SafeTensors<'a>); pub struct MmapedSafetensors { safetensors: Vec<yoke::Yoke<SafeTensors_<'static>, memmap2::Mmap>>, routing: Option<HashMap<String, usize>>, } impl MmapedSafetensors { /// Creates a wrapper around a memory mapped file and deserialize the safetensors header. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let file = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; let safetensors = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart( file, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data) .map_err(|e| Error::from(e).with_path(p))?; Ok::<_, Error>(SafeTensors_(st)) }, )?; Ok(Self { safetensors: vec![safetensors], routing: None, }) } /// Creates a wrapper around multiple memory mapped file and deserialize the safetensors headers. /// /// If a tensor name appears in multiple files, the last entry is returned. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn multi<P: AsRef<Path>>(paths: &[P]) -> Result<Self> { let mut routing = HashMap::new(); let mut safetensors = vec![]; for (index, p) in paths.iter().enumerate() { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let file = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; let data = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart( file, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data) .map_err(|e| Error::from(e).with_path(p))?; Ok::<_, Error>(SafeTensors_(st)) }, )?; for k in data.get().0.names() { routing.insert(k.to_string(), index); } safetensors.push(data) } Ok(Self { safetensors, routing: Some(routing), }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.get(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { let mut tensors = vec![]; for safetensors in self.safetensors.iter() { tensors.push(safetensors.get().0.tensors()) } tensors.into_iter().flatten().collect() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { let index = match &self.routing { None => 0, Some(routing) => { let index = routing.get(name).ok_or_else(|| { Error::CannotFindTensor { path: name.to_string(), } .bt() })?; *index } }; Ok(self.safetensors[index].get().0.tensor(name)?) } } pub struct SliceSafetensors<'a> { safetensors: SafeTensors<'a>, } impl<'a> SliceSafetensors<'a> { /// Creates a wrapper around a binary buffer and deserialize the safetensors header. pub fn new(buffer: &'a [u8]) -> Result<Self> { let safetensors = safetensors::SafeTensors::deserialize(buffer)?; Ok(Self { safetensors }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.safetensors.tensor(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { self.safetensors.tensors() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { Ok(self.safetensors.tensor(name)?) } } pub struct BufferedSafetensors { safetensors: yoke::Yoke<SafeTensors_<'static>, Vec<u8>>, } impl BufferedSafetensors { /// Creates a wrapper around a binary buffer and deserialize the safetensors header. pub fn new(buffer: Vec<u8>) -> Result<Self> { let safetensors = yoke::Yoke::<SafeTensors_<'static>, Vec<u8>>::try_attach_to_cart( buffer, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data)?; Ok::<_, Error>(SafeTensors_(st)) }, )?; Ok(Self { safetensors }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.get(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { self.safetensors.get().0.tensors() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { Ok(self.safetensors.get().0.tensor(name)?) } } pub struct MmapedFile { path: std::path::PathBuf, inner: memmap2::Mmap, } impl MmapedFile { /// Creates a wrapper around a memory mapped file from which you can retrieve /// tensors using [`MmapedFile::deserialize`] /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let inner = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; Ok(Self { inner, path: p.to_path_buf(), }) } pub fn deserialize(&self) -> Result<SafeTensors<'_>> { let st = safetensors::SafeTensors::deserialize(&self.inner) .map_err(|e| Error::from(e).with_path(&self.path))?; Ok(st) } } #[cfg(test)] mod tests { use super::*; use std::collections::HashMap; #[test] fn save_single_tensor() { let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap(); t.save_safetensors("t", "t.safetensors").unwrap(); let bytes = std::fs::read("t.safetensors").unwrap(); assert_eq!(bytes, b"@\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); std::fs::remove_file("t.safetensors").unwrap(); } #[test] fn save_load_multiple_tensors() { let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap(); let u = Tensor::zeros((1, 2), DType::F32, &Device::Cpu).unwrap(); let map: HashMap<_, _> = [("t", t), ("u", u)].into_iter().collect(); save(&map, "multi.safetensors").unwrap(); let weights = load("multi.safetensors", &Device::Cpu).unwrap(); assert_eq!(weights.get("t").unwrap().dims(), &[2, 2]); assert_eq!(weights.get("u").unwrap().dims(), &[1, 2]); let bytes = std::fs::read("multi.safetensors").unwrap(); assert_eq!(bytes, b"x\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]},\"u\":{\"dtype\":\"F32\",\"shape\":[1,2],\"data_offsets\":[16,24]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); std::fs::remove_file("multi.safetensors").unwrap(); } }
candle/candle-core/src/safetensors.rs/0
{ "file_path": "candle/candle-core/src/safetensors.rs", "repo_id": "candle", "token_count": 8064 }
19
#![allow(clippy::approx_constant)] use anyhow::{Context, Result}; use candle_core::{test_device, test_utils, Device, Shape, Tensor, Var}; fn simple_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; let x = x.as_tensor(); let y = (((x * x)? + x * 5f64)? + 4f64)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(x.to_vec1::<f32>()?, [3., 1., 4.]); // y = x^2 + 5.x + 4 assert_eq!(y.to_vec1::<f32>()?, [28., 10., 40.]); // dy/dx = 2.x + 5 assert_eq!(grad_x.to_vec1::<f32>()?, [11., 7., 13.]); Ok(()) } fn sum_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; let x = x.as_tensor(); let y = (x.sqr()?.sum_keepdim(0)? * 2.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [52.]); // y = 2.x^2 so dy/dx = 4.x assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]); // Same test as before but squeezing on the last dimension. let y = (x.sqr()?.sum_keepdim(0)? * 2.)?.squeeze(0)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_scalar::<f32>()?, 52.); // y = 2.x^2 so dy/dx = 4.x assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]); Ok(()) } fn matmul_grad(device: &Device) -> Result<()> { let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let x = Var::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let y = Var::from_slice(&data, (2, 3, 2), device)?; let c = x.matmul(&y)?; let grads = c.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; let grad_y = grads.get(&y).context("no grad for y")?; assert_eq!(grad_x.shape(), &Shape::from((2, 2, 3))); assert_eq!(grad_y.shape(), &Shape::from((2, 3, 2))); assert_eq!( &*grad_x.to_vec3::<f32>()?, &[ [[1., 5., 9.], [1., 5., 9.]], [[13., 17., 21.], [13., 17., 21.]] ] ); assert_eq!( &*grad_y.to_vec3::<f32>()?, &[ [[3., 3.], [5., 5.], [7., 7.]], [[15., 15.], [17., 17.], [19., 19.]] ] ); Ok(()) } // The simplest gradient descent, using scalar variable. fn grad_descent(device: &Device) -> Result<()> { let x = Var::new(0f32, device)?; let learning_rate = 0.1; for _step in 0..100 { let xt = x.as_tensor(); let c = ((xt - 4.2)? * (xt - 4.2)?)?; let grads = c.backward()?; let x_grad = grads.get(&x).context("no grad for x")?; x.set(&(xt - x_grad * learning_rate)?)? } assert_eq!(x.to_scalar::<f32>()?, 4.199999); Ok(()) } fn unary_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4., 0.15], device)?; let x = x.as_tensor(); let y = (x.log()? + 1.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.0986, 1.0, 2.3863, -0.8971] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.3333, 1.0, 0.25, 6.6667] ); let y = x.exp()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [20.0855, 2.7183, 54.5982, 1.1618] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [20.0855, 2.7183, 54.5982, 1.1618] ); let y = x.exp()?.sqr()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 3)?, [403.429, 7.389, 2980.958, 1.35] ); // exp(x)^2 = exp(2*x) assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [806.86, 14.78, 5961.92, 2.7] ); let y = x.sin()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [0.1411, 0.8415, -0.7568, 0.1494], ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [-0.99, 0.5403, -0.6536, 0.9888], ); let y = x.cos()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-0.99, 0.5403, -0.6536, 0.9888], ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [-0.1411, -0.8415, 0.7568, -0.1494], ); let y = x.sqr()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [9.0, 1.0, 16.0, 0.0225]); assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, 8.0, 0.3]); let y = x.sqr()?.sqrt()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [3.0, 1.0, 4.0, 0.15]); assert_eq!(test_utils::to_vec1_round(grad_x, 4)?, [1.0, 1.0, 1.0, 1.0]); let y = x.neg()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [-3.0, -1.0, -4.0, -0.15]); assert_eq!(grad_x.to_vec1::<f32>()?, [-1.0, -1.0, -1.0, -1.0]); let y = x.affine(0.2, 1.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [1.6, 1.2, 1.8, 1.03]); assert_eq!(grad_x.to_vec1::<f32>()?, [0.2, 0.2, 0.2, 0.2]); let y = Tensor::new(1f32, device)?.broadcast_div(x)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [0.3333, 1.0, 0.25, 6.6667] ); assert_eq!( grad_x.to_vec1::<f32>()?, [-0.11111111, -1.0, -0.0625, -44.444443], ); let y = x.broadcast_div(&Tensor::new(0.5f32, device)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [6., 2., 8., 0.3]); assert_eq!(grad_x.to_vec1::<f32>()?, [2., 2., 2., 2.]); let x = Var::new(&[3f32, 1., 4., 0.15], device)?; let y = x.powf(2.5)?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 2)?, [15.59, 1.0, 32.0, 0.01]); assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [12.99, 2.5, 20.0, 0.15] ); let y = x.tanh()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 2)?, [1.0, 0.76, 1.0, 0.15]); assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [0.01, 0.42, 0.0, 0.98], ); // testing compared to pytorch nn.GELU(approximate = 'tanh') let y = x.gelu()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.9964, 0.8412, 3.9999, 0.0839] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0116, 1.0830, 1.0003, 0.6188], ); // Testing compared to pytorch torch.erf // // import torch // x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True) // y = x.erf() // print(y) // loss = y.sum() // loss.backward() // print(x.grad) let y = x.erf()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 4)?, [1.0, 0.8427, 1.0, 0.168]); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.0001, 0.4151, 0.0, 1.1033], ); // Testing compared to pytorch nn.GELU(approximate = 'none') // // import torch // import torch.nn.functional as F // x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True) // y = F.gelu(x, approximate='none') // print(y) // loss = y.sum() // loss.backward() // print(x.grad) let y = x.gelu_erf()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.9960, 0.8413, 3.9999, 0.0839] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0119, 1.0833, 1.0005, 0.6188], ); // Testing compared to pytorch elu // // import torch // import torch.nn.functional as F // x = torch.tensor([-1.0, 0.0, -2.0, 3.0], requires_grad=True) // y = F.elu(x, alpha=2.0) // print(y) // loss = y.min // loss = y.sum() // loss.backward() // print(x.grad) let elu_x = Var::new(&[-1.0f32, 0., -2., 3.], device)?; let y = elu_x.elu(2.)?; let grads = y.backward()?; let grad_x = grads.get(&elu_x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-1.2642, 0.0000, -1.7293, 3.0000] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.7358, 2.0000, 0.2707, 1.0000] ); // testing compared to pytorch nn.Silu() let y = x.silu()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.8577, 0.7311, 3.9281, 0.0806] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0881, 0.9277, 1.0527, 0.5747], ); if device.is_cpu() { let x = Var::new(&[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]], device)?; let y = x.interpolate1d(12)?.reshape(36)?; let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., ], device, )?; let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec3_round(grad_x, 4)?, [[[10_f32, 26., 42.], [58., 74., 90.], [106., 122., 138.]]] ); } // manually checked: see comments let x = Var::new(&[[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]]], device)?; let y = x.interpolate2d(6, 6)?.reshape(36)?; let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., ], device, )?; // gradient should be // row 1 // 1+2+7+8 = 18 // 3+4+9+10 = 26 // 5+6+11+12 = 34 // row 2 // 13+14+19+20 = 66 // 15+16+21+22 = 74 // 17+18+23+24 = 82 // row 3 // 25+26+31+32 = 114 // 27+28+33+34 = 122 // 29+30+35+36 = 130 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?, [[18_f32, 26., 34.], [66., 74., 82.], [114., 122., 130.]] ); // manually checked: see comments let x = Var::new(&[[[[1f32, 2.], [4., 5.]]]], device)?; let y = x.interpolate2d(6, 6)?.reshape(36)?; let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., ], device, )?; // gradient should be // row 1 // 1+2+3+7+8+9+13+14+15 = 72 // 4+5+6+10+11+12+16+17+18 = 99 // row 2 // 19+20+21+25+26+27+31+32+33 = 234 // 22+23+24+28+29+30+34+35+36 = 243 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?, [[72_f32, 99.], [234., 261.]] ); // manually checked: see comments let x = Var::new(&[[[[1f32, 2.], [4., 5.]], [[6f32, 7.], [8., 9.]]]], device)?; let y = x.interpolate2d(4, 4)?.reshape(32)?; #[rustfmt::skip] let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32. ], device, )?; // gradient should be // m1r1 // 1+2+5+6=14 // 3+4+7+8=22 // m1r2 // 9+10+13+14=46 // 11+12+15+16=54 // m2r1 // 17+18+21+22=78 // 19+20+23+24=86 // m2r2 // 25+26+29+30=110 // 27+28+31+32=118 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?, [[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]] ); // manually checked: see comments let x = Var::new( &[[[[1f32, 2.], [4., 5.]]], [[[6f32, 7.], [8., 9.]]]], device, )?; let y = x.interpolate2d(4, 4)?.reshape(32)?; #[rustfmt::skip] let z = Tensor::new( &[ 1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32. ], device, )?; // gradient should be // m1r1 // 1+2+5+6=14 // 3+4+7+8=22 // m1r2 // 9+10+13+14=46 // 11+12+15+16=54 // m2r1 // 17+18+21+22=78 // 19+20+23+24=86 // m2r2 // 25+26+29+30=110 // 27+28+31+32=118 let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?; let grads = loss.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?, [[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]] ); Ok(()) } fn binary_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., -4., -1.], device)?; let x = x.as_tensor(); // leaky relu let y = x.maximum(&(x * 0.1)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(x.to_vec1::<f32>()?, [3., 1., -4., -1.]); assert_eq!(y.to_vec1::<f32>()?, [3., 1., -0.4, -0.1]); assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 0.1, 0.1]); let y = x.minimum(&(x * 0.1)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [0.3, 0.1, -4., -1.]); assert_eq!(grad_x.to_vec1::<f32>()?, [0.1, 0.1, 1., 1.]); // This one is easy to mess up, we want the gradient to be one as it is the identity function. let y = x.minimum(x)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [3., 1., -4., -1.]); assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 1., 1.]); let x_var = Var::new(&[3f32, 1., -4., -1., 5., 9.], device)?; let x = x_var.as_tensor(); let y_var = Var::new(&[2f32, 7., 1.], device)?; let y = y_var.as_tensor(); let ss = x .reshape((2, 3))? .slice_scatter0(&y.reshape((1, 3))?, 1)? .sqr()?; let grads = ss.backward()?; let grad_x = grads.get(x).context("no grad for x")?; let grad_y = grads.get(y).context("no grad for y")?; assert_eq!(ss.to_vec2::<f32>()?, [[9., 1., 16.], [4., 49., 1.]]); assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, -8.0, 0.0, 0.0, 0.0]); assert_eq!(grad_y.to_vec1::<f32>()?, [4.0, 14.0, 2.0]); Ok(()) } test_device!( simple_grad, simple_grad_cpu, simple_grad_gpu, simple_grad_metal ); test_device!(sum_grad, sum_grad_cpu, sum_grad_gpu, sum_grad_metal); test_device!( matmul_grad, matmul_grad_cpu, matmul_grad_gpu, matmul_grad_metal ); test_device!( grad_descent, grad_descent_cpu, grad_descent_gpu, grad_descent_metal ); test_device!(unary_grad, unary_grad_cpu, unary_grad_gpu, unary_grad_metal); test_device!( binary_grad, binary_grad_cpu, binary_grad_gpu, binary_grad_metal );
candle/candle-core/tests/grad_tests.rs/0
{ "file_path": "candle/candle-core/tests/grad_tests.rs", "repo_id": "candle", "token_count": 9105 }
20
# candle-datasets
candle/candle-datasets/README.md/0
{ "file_path": "candle/candle-datasets/README.md", "repo_id": "candle", "token_count": 7 }
21
# candle-bert Bert is a general large language model. In this example it can be used for two different tasks: - Compute sentence embeddings for a prompt. - Compute similarities between a set of sentences. ## Sentence embeddings Bert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example bert --release -- --prompt "Here is a test sentence" > [[[ 0.0798, -0.0665, -0.0247, ..., -0.1082, -0.1000, -0.2751], > [ 0.4218, 0.2690, 0.2740, ..., 0.3889, 1.3503, 0.9908], > [ 0.0466, 0.3041, -0.1143, ..., 0.4427, 0.6926, -0.1515], > ... > [ 0.3396, 0.4320, -0.4408, ..., 0.9212, 0.2331, -0.6777], > [ 0.2789, 0.7539, 0.4306, ..., -0.0095, 0.3375, -1.7529], > [ 0.6737, 0.7882, 0.0548, ..., 0.1836, 0.7299, -0.6617]]] > Tensor[[1, 7, 384], f32] ``` ### Custom models You can specify different models, such as BGE, with the `--model-id` flag: ```bash cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" Loaded and encoded 435.70775ms [[[ 3.0944e-1, -7.8455e-5, -1.2768e0, ..., 1.3755e-2, -3.2371e-1, 2.3819e-1], [-2.8506e-1, 1.9953e-1, -1.3076e0, ..., 6.9819e-2, 1.0833e-2, -1.1512e0], [ 3.9892e-1, 2.0000e-1, -9.3178e-1, ..., -4.1393e-1, -4.9644e-2, -3.3786e-1], ... [ 6.0345e-1, 3.5744e-1, -1.2672e0, ..., -6.9165e-1, -3.4973e-3, -8.4214e-1], [ 3.9218e-1, -3.2735e-1, -1.3123e0, ..., -4.9318e-1, -5.1334e-1, -3.6391e-1], [ 3.0978e-1, 2.5662e-4, -1.2773e0, ..., 1.3357e-2, -3.2390e-1, 2.3858e-1]]] Tensor[[1, 9, 1024], f32] Took 176.744667ms ``` ### Gelu approximation You can get a speedup by using an approximation of the gelu activation, with a small loss of precision, by passing the `--approximate-gelu` flag: ```bash $ cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" \ --approximate-gelu Loaded and encoded 244.388042ms [[[ 3.1048e-1, -6.0339e-4, -1.2758e0, ..., 1.3718e-2, -3.2362e-1, 2.3775e-1], [-2.8354e-1, 1.9984e-1, -1.3077e0, ..., 6.9390e-2, 9.9681e-3, -1.1531e0], [ 3.9947e-1, 1.9917e-1, -9.3178e-1, ..., -4.1301e-1, -5.0719e-2, -3.3955e-1], ... [ 6.0499e-1, 3.5664e-1, -1.2642e0, ..., -6.9134e-1, -3.4581e-3, -8.4471e-1], [ 3.9311e-1, -3.2812e-1, -1.3105e0, ..., -4.9291e-1, -5.1270e-1, -3.6543e-1], [ 3.1082e-1, -2.6737e-4, -1.2762e0, ..., 1.3319e-2, -3.2381e-1, 2.3815e-1]]] Tensor[[1, 9, 1024], f32] Took 116.840791ms ``` ## Similarities In this example, Bert is used to compute the sentence embeddings for a set of sentences (hardcoded in the examples). Then cosine similarities are computed for each sentence pair and they are reported by decreasing values, hence the first reported pair contains the two sentences that have the highest similarity score. The sentence embeddings are computed using average pooling through all the sentence tokens, including some potential padding. ```bash cargo run --example bert --release > score: 0.85 'The new movie is awesome' 'The new movie is so great' > score: 0.61 'The cat sits outside' 'The cat plays in the garden' > score: 0.52 'I love pasta' 'Do you like pizza?' > score: 0.23 'The new movie is awesome' 'Do you like pizza?' > score: 0.22 'I love pasta' 'The new movie is awesome' ```
candle/candle-examples/examples/bert/README.md/0
{ "file_path": "candle/candle-examples/examples/bert/README.md", "repo_id": "candle", "token_count": 1564 }
22
/* * Adapted from * https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/reduce_kernel_utils.cuh * Copyright (c) 2023, The vLLM team. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once template <typename T> __inline__ __device__ T warpReduceSum(T val) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(0xffffffff, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if (lane == 0) shared[wid] = val; __syncthreads(); // Modify from blockDim.x << 5 to blockDim.x / 32. to prevent // blockDim.x is not divided by 32 val = (threadIdx.x < (blockDim.x / 32.f)) ? shared[lane] : (T)(0.0f); val = warpReduceSum<T>(val); return val; }
candle/candle-examples/examples/custom-ops/kernels/reduction_utils.cuh/0
{ "file_path": "candle/candle-examples/examples/custom-ops/kernels/reduction_utils.cuh", "repo_id": "candle", "token_count": 529 }
23
# gte-Qwen1.5-7B-instruct gte-Qwen1.5-7B-instruct is a variant of the GTE embedding model family. - [Model card](https://huggingface.co/Alibaba-NLP/gte-Qwen1.5-7B-instruct) on the HuggingFace Hub. - [Technical report](https://arxiv.org/abs/2308.03281) *Towards General Text Embeddings with Multi-stage Contrastive Learning* ## Running the example Automatically download the model from the HuggingFace hub: ```bash $ cargo run --example gte-qwen --release ``` or, load the model from a local directory: ```bash cargo run --example gte-qwen --release --features cuda -- --local-repo /path/to/gte_Qwen1.5-7B-instruct/ ```
candle/candle-examples/examples/gte-qwen/README.md/0
{ "file_path": "candle/candle-examples/examples/gte-qwen/README.md", "repo_id": "candle", "token_count": 229 }
24
# candle-mamba-minimal: minimal implementation of Mamba This is based on [mamba-minimal](https://github.com/johnma2006/mamba-minimal). Compared to the mamba example, this version can handle training but is much slower. ## Running the example ```bash $ cargo run --example mamba-minimal --release -- --prompt "Mamba is the" Mamba is the most popular and best-selling game in the world. It has been downloaded more than 1,000 times by over 1 million people worldwide since its release on March 18th 2016. The Mamba series of games are a collection that combines elements from all genres including action, adventure, strategy & puzzle games with some unique gameplay features such as stealth and survival. The game is also known for its innovative graphics and the ability to play in a variety of different modes like single player or multiplayer. ```
candle/candle-examples/examples/mamba-minimal/README.md/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/README.md", "repo_id": "candle", "token_count": 206 }
25
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::{Parser, ValueEnum}; use candle::{DType, Device, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use candle_transformers::models::mobileclip; use tokenizers::Tokenizer; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { S1, S2, } impl Which { fn model_name(&self) -> String { let name = match self { Self::S1 => "S1", Self::S2 => "S2", }; format!("apple/MobileCLIP-{}-OpenCLIP", name) } fn config(&self) -> mobileclip::MobileClipConfig { match self { Self::S1 => mobileclip::MobileClipConfig::s1(), Self::S2 => mobileclip::MobileClipConfig::s2(), } } } #[derive(Parser)] struct Args { #[arg(long, use_value_delimiter = true)] images: Option<Vec<String>>, #[arg(long)] cpu: bool, /// Use the pytorch weights rather than the safetensors ones #[arg(long)] use_pth: bool, #[arg(long, use_value_delimiter = true)] sequences: Option<Vec<String>>, #[arg(value_enum, long, default_value_t=Which::S1)] which: Which, } fn load_images<T: AsRef<std::path::Path>>( paths: &Vec<T>, image_size: usize, ) -> anyhow::Result<Tensor> { let mut images = vec![]; for path in paths { let tensor = candle_examples::imagenet::load_image_with_std_mean( path, image_size, &[0.0, 0.0, 0.0], &[1.0, 1.0, 1.0], )?; images.push(tensor); } let images = Tensor::stack(&images, 0)?; Ok(images) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_name = args.which.model_name(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); let model_file = if args.use_pth { api.get("open_clip_pytorch_model.bin")? } else { api.get("open_clip_model.safetensors")? }; let tokenizer = api.get("tokenizer.json")?; let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let config = &args.which.config(); let device = candle_examples::device(args.cpu)?; let vec_imgs = match args.images { Some(imgs) => imgs, None => vec![ "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(), "candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(), ], }; let images = load_images(&vec_imgs, config.image_size)?.to_device(&device)?; let vb = if args.use_pth { VarBuilder::from_pth(&model_file, DType::F32, &device)? } else { unsafe { VarBuilder::from_mmaped_safetensors(&[model_file.clone()], DType::F32, &device)? } }; let model = mobileclip::MobileClipModel::new(vb, config)?; let (input_ids, vec_seq) = tokenize_sequences(args.sequences, &tokenizer, &device)?; let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?; println!("softmax_image_vec: {:?}", softmax_image_vec); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) .collect::<Vec<f32>>(); let probability_per_image = probability_vec.len() / vec_imgs.len(); for (i, img) in vec_imgs.iter().enumerate() { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; println!("\n\nResults for image: {}\n", img); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {}", p, vec_seq[i]); } } Ok(()) } pub fn tokenize_sequences( sequences: Option<Vec<String>>, tokenizer: &Tokenizer, device: &Device, ) -> anyhow::Result<(Tensor, Vec<String>)> { // let pad_id = *tokenizer // .get_vocab(true) // .get("<|endoftext|>") // .ok_or(E::msg("No pad token"))?; // The model does not work well if the text is padded using the <|endoftext|> token, using 0 // as the original OpenCLIP code. let pad_id = 0; let vec_seq = match sequences { Some(seq) => seq, None => vec![ "a cycling race".to_string(), "a photo of two cats".to_string(), "a robot holding a candle".to_string(), ], }; let mut tokens = vec![]; for seq in vec_seq.clone() { let encoding = tokenizer.encode(seq, true).map_err(E::msg)?; tokens.push(encoding.get_ids().to_vec()); } let max_len = tokens.iter().map(|v| v.len()).max().unwrap_or(0); // Pad the sequences to have the same length for token_vec in tokens.iter_mut() { let len_diff = max_len - token_vec.len(); if len_diff > 0 { token_vec.extend(vec![pad_id; len_diff]); } } let input_ids = Tensor::new(tokens, device)?; Ok((input_ids, vec_seq)) }
candle/candle-examples/examples/mobileclip/main.rs/0
{ "file_path": "candle/candle-examples/examples/mobileclip/main.rs", "repo_id": "candle", "token_count": 2305 }
26
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::parler_tts::{Config, Model}; use tokenizers::Tokenizer; #[derive(Parser)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long, default_value = "Hey, how are you doing today?")] prompt: String, #[arg( long, default_value = "A female speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. The recording is of very high quality, with the speaker's voice sounding clear and very close up." )] description: String, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.0)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 0)] seed: u64, #[arg(long, default_value_t = 5000)] sample_len: usize, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] quantized: bool, /// Use f16 precision for all the computations rather than f32. #[arg(long)] f16: bool, #[arg(long)] model_file: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long, default_value_t = 512)] max_steps: usize, /// The output wav file. #[arg(long, default_value = "out.wav")] out_file: String, #[arg(long, default_value = "large-v1")] which: Which, } #[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)] enum Which { #[value(name = "large-v1")] LargeV1, #[value(name = "mini-v1")] MiniV1, } fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature, args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = hf_hub::api::sync::Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id.to_string(), None => match args.which { Which::LargeV1 => "parler-tts/parler-tts-large-v1".to_string(), Which::MiniV1 => "parler-tts/parler-tts-mini-v1".to_string(), }, }; let revision = match args.revision { Some(r) => r, None => "main".to_string(), }; let repo = api.repo(hf_hub::Repo::with_revision( model_id, hf_hub::RepoType::Model, revision, )); let model_files = match args.model_file { Some(m) => vec![m.into()], None => match args.which { Which::MiniV1 => vec![repo.get("model.safetensors")?], Which::LargeV1 => { candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")? } }, }; let config = match args.config_file { Some(m) => m.into(), None => repo.get("config.json")?, }; let tokenizer = match args.tokenizer_file { Some(m) => m.into(), None => repo.get("tokenizer.json")?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_files, DType::F32, &device)? }; let config: Config = serde_json::from_reader(std::fs::File::open(config)?)?; let mut model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let description_tokens = tokenizer .encode(args.description, true) .map_err(E::msg)? .get_ids() .to_vec(); let description_tokens = Tensor::new(description_tokens, &device)?.unsqueeze(0)?; let prompt_tokens = tokenizer .encode(args.prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let prompt_tokens = Tensor::new(prompt_tokens, &device)?.unsqueeze(0)?; let lp = candle_transformers::generation::LogitsProcessor::new( args.seed, Some(args.temperature), args.top_p, ); println!("starting generation..."); let codes = model.generate(&prompt_tokens, &description_tokens, lp, args.max_steps)?; println!("generated codes\n{codes}"); let codes = codes.to_dtype(DType::I64)?; codes.save_safetensors("codes", "out.safetensors")?; let codes = codes.unsqueeze(0)?; let pcm = model .audio_encoder .decode_codes(&codes.to_device(&device)?)?; println!("{pcm}"); let pcm = pcm.i((0, 0))?; let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?; let pcm = pcm.to_vec1::<f32>()?; let mut output = std::fs::File::create(&args.out_file)?; candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, config.audio_encoder.sampling_rate)?; Ok(()) }
candle/candle-examples/examples/parler-tts/main.rs/0
{ "file_path": "candle/candle-examples/examples/parler-tts/main.rs", "repo_id": "candle", "token_count": 2678 }
27
import gymnasium as gym import numpy as np from collections import deque from PIL import Image from multiprocessing import Process, Pipe # atari_wrappers.py class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset() if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.integers(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(0) if done: obs = self.env.reset() return obs class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self): self.env.reset() obs, _, done, _ = self.env.step(1) if done: self.env.reset() obs, _, done, _ = self.env.step(2) if done: self.env.reset() return obs class ImageSaver(gym.Wrapper): def __init__(self, env, img_path, rank): gym.Wrapper.__init__(self, env) self._cnt = 0 self._img_path = img_path self._rank = rank def step(self, action): step_result = self.env.step(action) obs, _, _, _ = step_result img = Image.fromarray(obs, 'RGB') img.save('%s/out%d-%05d.png' % (self._img_path, self._rank, self._cnt)) self._cnt += 1 return step_result class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so its important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset() else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = deque(maxlen=2) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for _ in range(self._skip): obs, reward, done, info = self.env.step(action) self._obs_buffer.append(obs) total_reward += reward if done: break max_frame = np.max(np.stack(self._obs_buffer), axis=0) return max_frame, total_reward, done, info def reset(self): """Clear past frame buffer and init. to first obs. from inner env.""" self._obs_buffer.clear() obs = self.env.reset() self._obs_buffer.append(obs) return obs class ClipRewardEnv(gym.RewardWrapper): def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env): """Warp frames to 84x84 as done in the Nature paper and later work.""" gym.ObservationWrapper.__init__(self, env) self.res = 84 self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8') def observation(self, obs): frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32')) frame = np.array(Image.fromarray(frame).resize((self.res, self.res), resample=Image.BILINEAR), dtype=np.uint8) return frame.reshape((self.res, self.res, 1)) class FrameStack(gym.Wrapper): def __init__(self, env, k): """Buffer observations and stack across channels (last axis).""" gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape assert shp[2] == 1 # can only stack 1-channel frames self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8') def reset(self): """Clear buffer and re-fill by duplicating the first observation.""" ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self.observation() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self.observation(), reward, done, info def observation(self): assert len(self.frames) == self.k return np.concatenate(self.frames, axis=2) def wrap_deepmind(env, episode_life=True, clip_rewards=True): """Configure environment for DeepMind-style Atari. Note: this does not include frame stacking!""" assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip if episode_life: env = EpisodicLifeEnv(env) env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) return env # envs.py def make_env(env_id, img_dir, seed, rank): def _thunk(): env = gym.make(env_id) env.reset(seed=(seed + rank)) if img_dir is not None: env = ImageSaver(env, img_dir, rank) env = wrap_deepmind(env) env = WrapPyTorch(env) return env return _thunk class WrapPyTorch(gym.ObservationWrapper): def __init__(self, env=None): super(WrapPyTorch, self).__init__(env) self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32') def observation(self, observation): return observation.transpose(2, 0, 1) # vecenv.py class VecEnv(object): """ Vectorized environment base class """ def step(self, vac): """ Apply sequence of actions to sequence of environments actions -> (observations, rewards, news) where 'news' is a boolean vector indicating whether each element is new. """ raise NotImplementedError def reset(self): """ Reset all environments """ raise NotImplementedError def close(self): pass # subproc_vec_env.py def worker(remote, env_fn_wrapper): env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) else: raise NotImplementedError class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns): """ envs: list of gym environments to run in subprocesses """ nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn))) for (work_remote, env_fn) in zip(self.work_remotes, env_fns)] for p in self.ps: p.start() self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() def step(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() @property def num_envs(self): return len(self.remotes) # Create the environment. def make(env_name, img_dir, num_processes): envs = SubprocVecEnv([ make_env(env_name, img_dir, 1337, i) for i in range(num_processes) ]) return envs
candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py", "repo_id": "candle", "token_count": 4740 }
28
# candle-segformer - [HuggingFace Segformer Model Card][segformer] - [`mit-b0` - An encoder only pretrained model][encoder] - [`segformer-b0-finetuned-ade-512-512` - A fine tuned model for segmentation][ade512] ## How to run the example If you want you can use the example images from this [pull request][pr], download them and supply the path to the image as an argument to the example. ```bash # run the image classification task cargo run --example segformer classify <path-to-image> # run the segmentation task cargo run --example segformer segment <path-to-image> ``` Example output for classification: ```text classification logits [3.275261e-5, 0.0008562019, 0.0008868563, 0.9977506, 0.0002465068, 0.0002241473, 2.846596e-6] label: hamburger ``` [pr]: https://github.com/huggingface/candle/pull/1617 [segformer]: https://huggingface.co/docs/transformers/model_doc/segformer [encoder]: https://huggingface.co/nvidia/mit-b0 [ade512]: https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512
candle/candle-examples/examples/segformer/README.md/0
{ "file_path": "candle/candle-examples/examples/segformer/README.md", "repo_id": "candle", "token_count": 357 }
29
# candle-t5 ## Encoder-decoder example: ```bash $ cargo run --example t5 --release -- --model-id "t5-small" --prompt "translate to German: A beautiful candle." --decode ... Eine schöne Kerze. 9 tokens generated (2.42 token/s) ``` Variants such as [flan-t5](https://huggingface.co/google/flan-t5-small), [flan-ul2](https://huggingface.co/google/flan-ul2) (with `--revision "refs/pr/25"`), and [Co-EdIT](https://huggingface.co/grammarly/coedit-large) are also supported. ## Translation with [MADLAD-400](https://arxiv.org/abs/2309.04662) MADLAD-400 is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models. ```bash cargo run --example t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" \ --prompt "<2de> How are you, my friend?" \ --decode --temperature 0 ... Wie geht es dir, mein Freund? ``` ## Sentence embedding example ```bash $ cargo run --example t5 --release -- --model-id "t5-small" --prompt "A beautiful candle." ... [[[ 0.0515, -0.0541, -0.0761, ..., -0.0392, 0.1511, -0.0265], [-0.0974, 0.0998, -0.1659, ..., -0.2450, 0.1738, -0.0164], [ 0.0624, -0.1024, 0.0430, ..., -0.1388, 0.0564, -0.2962], [-0.0389, -0.1173, 0.0026, ..., 0.1064, -0.1065, 0.0990], [ 0.1300, 0.0027, -0.0326, ..., 0.0026, -0.0317, 0.0851]]] Tensor[[1, 5, 512], f32] Took 303.766583ms ```
candle/candle-examples/examples/t5/README.md/0
{ "file_path": "candle/candle-examples/examples/t5/README.md", "repo_id": "candle", "token_count": 608 }
30
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include "philox.cuh" #include "utils.h" namespace flash { struct Dropout { const unsigned long long seed, offset; const uint8_t p_dropout_in_uint8_t; __forceinline__ __device__ Dropout(const unsigned long long seed, const unsigned long long offset, const uint8_t p_dropout_in_uint8_t, const int bid, const int hid, const int tid, const int nheads) : seed(seed) , offset(offset + (bid * nheads + hid) * 32 + tid % 32) , p_dropout_in_uint8_t(p_dropout_in_uint8_t) { } template <bool encode_dropout_in_sign_bit=false, typename Engine, typename Layout> __forceinline__ __device__ void apply_dropout(Tensor<Engine, Layout> &tensor_, int block_row_start, int block_col_start, int block_row_stride) { // convert shape from (4, MMA_M, MMA_N) to (8, MMA_M, MMA_N / 2) Tensor tensor = make_tensor(tensor_.data(), flash::convert_layout_acc_dropout(tensor_.layout())); using T = typename Engine::value_type; auto encode_dropout = [](bool keep, T val) { return keep ? val : (encode_dropout_in_sign_bit ? -val : T(0)); }; static_assert(decltype(size<2>(tensor))::value % 2 == 0); const uint16_t p_dropout_8bit_in_uint16_t = uint16_t(p_dropout_in_uint8_t); const uint32_t p_dropout_8bit_in_uint32_t = (uint32_t(p_dropout_8bit_in_uint16_t) << 16) | uint32_t(p_dropout_8bit_in_uint16_t); // if (cute::thread0()) { printf("threshold2 = 0x%x\n", p_dropout_8bit_in_uint32_t); } #pragma unroll for (int m = 0; m < size<1>(tensor); ++m, block_row_start += block_row_stride) { uint2 rowcol = make_uint2(block_row_start, block_col_start); #pragma unroll for (int n = 0; n < size<2>(tensor) / 2; ++n, ++rowcol.y) { // if (cute::thread(32, 0)) { printf("m = %d, n = %d, row = %d, col = %d\n", m, n, int(rowcol.x), int(rowcol.y));} uint4 random_uint4 = flash::philox(seed, reinterpret_cast<unsigned long long&>(rowcol), offset); // if (cute::thread0()) { printf("philox = %u, %d, %d, %d\n", random_uint4.x, random_uint4.y, random_uint4.z, random_uint4.w);} uint8_t (&rnd_8)[16] = reinterpret_cast<uint8_t (&)[16]>(random_uint4); // Special implementation for 16-bit types: we duplicate the threshold to the // low and high 16 bits of a 32-bit value, then use the f16x2 comparison instruction // to get a mask. The low 16 bits of the mask will be either 0xffff or 0x0000, // and the high 16 bits will be either 0xffff or 0x0000, depending on whether // the random value is less than the threshold. // We then do a bit-wise AND between the mask and the original value (in 32-bit). // We're exploiting the fact that floating point comparison is equivalent to integer // comparison, since we're comparing unsigned integers whose top 8-bits are zero. if (!encode_dropout_in_sign_bit && (std::is_same<T, cutlass::half_t>::value || std::is_same<T, cutlass::bfloat16_t>::value)) { uint16_t rnd_16[16]; #pragma unroll for (int i = 0; i < 16; i++) { rnd_16[i] = uint16_t(rnd_8[i]); } uint32_t (&rnd_32)[8] = reinterpret_cast<uint32_t (&)[8]>(rnd_16); #pragma unroll for (int j = 0; j < 2; j++) { Tensor tensor_uint32 = recast<uint32_t>(tensor(_, m, n * 2 + j)); // if (cute::thread0()) { printf("random = 0x%x, 0x%x, 0x%x, 0x%x\n", rnd_32[j * 4 + 0], rnd_32[j * 4 + 1], rnd_32[j * 4 + 2], rnd_32[j * 4 + 3]); } // if (cute::thread0()) { printf("tensor_uint32 = 0x%x, 0x%x, 0x%x, 0x%x\n", tensor_uint32(0), tensor_uint32(1), tensor_uint32(2), tensor_uint32(3)); } #pragma unroll for (int i = 0; i < 4; i++) { uint32_t mask; asm volatile("set.le.u32.f16x2 %0, %1, %2;\n" : "=r"(mask) : "r"(rnd_32[j * 4 + i]), "r"(p_dropout_8bit_in_uint32_t)); tensor_uint32(i) &= mask; } // if (cute::thread0()) { printf("tensor_uint32 = 0x%x, 0x%x, 0x%x, 0x%x\n", tensor_uint32(0), tensor_uint32(1), tensor_uint32(2), tensor_uint32(3)); } } } else { #pragma unroll for (int j = 0; j < 2; j++) { #pragma unroll for (int i = 0; i < 8; i++) { tensor(i, m, n * 2 + j) = encode_dropout(rnd_8[j * 8 + i] <= p_dropout_in_uint8_t, tensor(i, m, n * 2 + j)); } Tensor tensor_uint32 = recast<uint32_t>(tensor(_, m, n * 2 + j)); // if (cute::thread0()) { printf("tensor_uint32 = 0x%x, 0x%x, 0x%x, 0x%x\n", tensor_uint32(0), tensor_uint32(1), tensor_uint32(2), tensor_uint32(3)); } } } // // if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y == 0)) { // // printf("n = %d, ph Philox: %u, %u, %u, %u\n", n, rnd_8.x, rnd_8.y, rnd_8.z, rnd_8.w); // // } } } } }; } // namespace flash
candle/candle-flash-attn/kernels/dropout.h/0
{ "file_path": "candle/candle-flash-attn/kernels/dropout.h", "repo_id": "candle", "token_count": 3021 }
31
use core::ffi::{c_int, c_void}; extern "C" { pub(crate) fn run_mha( q_ptr: *const c_void, k_ptr: *const c_void, v_ptr: *const c_void, o_ptr: *const c_void, softmax_lse_ptr: *const c_void, alibi_slopes_ptr: *const c_void, cu_seqlens_q_ptr: *const i32, cu_seqlens_k_ptr: *const i32, q_batch_stride: u32, k_batch_stride: u32, v_batch_stride: u32, o_batch_stride: u32, alibi_slopes_batch_stride: u32, q_row_stride: u32, k_row_stride: u32, v_row_stride: u32, o_row_stride: u32, q_head_stride: u32, k_head_stride: u32, v_head_stride: u32, o_head_stride: u32, b: u32, h: u32, h_k: u32, d: u32, d_rounded: u32, softmax_scale: f32, seqlen_q: u32, seqlen_k: u32, seqlen_q_rounded: u32, seqlen_k_rounded: u32, is_bf16: c_int, is_causal: c_int, window_size_left: c_int, window_size_right: c_int, ); }
candle/candle-flash-attn/src/ffi.rs/0
{ "file_path": "candle/candle-flash-attn/src/ffi.rs", "repo_id": "candle", "token_count": 670 }
32
// Kernels adapted from llama.cpp ggml-cuda.cu // https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu #include "cuda_fp16.h" #include "cuda_bf16.h" #include<stdint.h> #define GGML_UNUSED(x) (void)(x) #define GGML_CUDA_ASSUME(x) #ifdef GGML_QKK_64 #define QK_K 64 #define K_SCALE_SIZE 4 #else #define QK_K 256 #define K_SCALE_SIZE 12 #endif #undef GGML_CUDA_F16 #define GGML_CUDA_DMMV_X 32 #define CUDA_QUANTIZE_BLOCK_SIZE 256 #define CUDA_DEQUANTIZE_BLOCK_SIZE 256 #define K_QUANTS_PER_ITERATION 2 typedef uint16_t ggml_fp16_t; typedef float dfloat; // dequantize float typedef float2 dfloat2; typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v); static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { x += __shfl_xor_sync(0xffffffff, x, mask, 32); } return x; } static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); } return x; } static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) { const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment int x32 = 0; x32 |= x16[0] << 0; x32 |= x16[1] << 16; return x32; } static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) { const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment int x32 = 0; x32 |= x16[0] << 0; x32 |= x16[1] << 16; return x32; } static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) { return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) { return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } #define WARP_SIZE 32 #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) #define CC_PASCAL 600 #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products #define CC_VOLTA 700 #define CC_OFFSET_AMD 1000000 #define CC_RDNA1 (CC_OFFSET_AMD + 1010) #define CC_RDNA2 (CC_OFFSET_AMD + 1030) #define CC_RDNA3 (CC_OFFSET_AMD + 1100) #define MMQ_X_Q4_0_RDNA2 64 #define MMQ_Y_Q4_0_RDNA2 128 #define NWARPS_Q4_0_RDNA2 8 #define MMQ_X_Q4_0_RDNA1 64 #define MMQ_Y_Q4_0_RDNA1 64 #define NWARPS_Q4_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_0_AMPERE 4 #define MMQ_Y_Q4_0_AMPERE 32 #define NWARPS_Q4_0_AMPERE 4 #else #define MMQ_X_Q4_0_AMPERE 64 #define MMQ_Y_Q4_0_AMPERE 128 #define NWARPS_Q4_0_AMPERE 4 #endif #define MMQ_X_Q4_0_PASCAL 64 #define MMQ_Y_Q4_0_PASCAL 64 #define NWARPS_Q4_0_PASCAL 8 #define MMQ_X_Q4_1_RDNA2 64 #define MMQ_Y_Q4_1_RDNA2 128 #define NWARPS_Q4_1_RDNA2 8 #define MMQ_X_Q4_1_RDNA1 64 #define MMQ_Y_Q4_1_RDNA1 64 #define NWARPS_Q4_1_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_1_AMPERE 4 #define MMQ_Y_Q4_1_AMPERE 32 #define NWARPS_Q4_1_AMPERE 4 #else #define MMQ_X_Q4_1_AMPERE 64 #define MMQ_Y_Q4_1_AMPERE 128 #define NWARPS_Q4_1_AMPERE 4 #endif #define MMQ_X_Q4_1_PASCAL 64 #define MMQ_Y_Q4_1_PASCAL 64 #define NWARPS_Q4_1_PASCAL 8 #define MMQ_X_Q5_0_RDNA2 64 #define MMQ_Y_Q5_0_RDNA2 128 #define NWARPS_Q5_0_RDNA2 8 #define MMQ_X_Q5_0_RDNA1 64 #define MMQ_Y_Q5_0_RDNA1 64 #define NWARPS_Q5_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q5_0_AMPERE 4 #define MMQ_Y_Q5_0_AMPERE 32 #define NWARPS_Q5_0_AMPERE 4 #else #define MMQ_X_Q5_0_AMPERE 128 #define MMQ_Y_Q5_0_AMPERE 64 #define NWARPS_Q5_0_AMPERE 4 #endif #define MMQ_X_Q5_0_PASCAL 64 #define MMQ_Y_Q5_0_PASCAL 64 #define NWARPS_Q5_0_PASCAL 8 #define MMQ_X_Q5_1_RDNA2 64 #define MMQ_Y_Q5_1_RDNA2 128 #define NWARPS_Q5_1_RDNA2 8 #define MMQ_X_Q5_1_RDNA1 64 #define MMQ_Y_Q5_1_RDNA1 64 #define NWARPS_Q5_1_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q5_1_AMPERE 4 #define MMQ_Y_Q5_1_AMPERE 32 #define NWARPS_Q5_1_AMPERE 4 #else #define MMQ_X_Q5_1_AMPERE 128 #define MMQ_Y_Q5_1_AMPERE 64 #define NWARPS_Q5_1_AMPERE 4 #endif #define MMQ_X_Q5_1_PASCAL 64 #define MMQ_Y_Q5_1_PASCAL 64 #define NWARPS_Q5_1_PASCAL 8 #define MMQ_X_Q8_0_RDNA2 64 #define MMQ_Y_Q8_0_RDNA2 128 #define NWARPS_Q8_0_RDNA2 8 #define MMQ_X_Q8_0_RDNA1 64 #define MMQ_Y_Q8_0_RDNA1 64 #define NWARPS_Q8_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q8_0_AMPERE 4 #define MMQ_Y_Q8_0_AMPERE 32 #define NWARPS_Q8_0_AMPERE 4 #else #define MMQ_X_Q8_0_AMPERE 128 #define MMQ_Y_Q8_0_AMPERE 64 #define NWARPS_Q8_0_AMPERE 4 #endif #define MMQ_X_Q8_0_PASCAL 64 #define MMQ_Y_Q8_0_PASCAL 64 #define NWARPS_Q8_0_PASCAL 8 #define MMQ_X_Q2_K_RDNA2 64 #define MMQ_Y_Q2_K_RDNA2 128 #define NWARPS_Q2_K_RDNA2 8 #define MMQ_X_Q2_K_RDNA1 128 #define MMQ_Y_Q2_K_RDNA1 32 #define NWARPS_Q2_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q2_K_AMPERE 4 #define MMQ_Y_Q2_K_AMPERE 32 #define NWARPS_Q2_K_AMPERE 4 #else #define MMQ_X_Q2_K_AMPERE 64 #define MMQ_Y_Q2_K_AMPERE 128 #define NWARPS_Q2_K_AMPERE 4 #endif #define MMQ_X_Q2_K_PASCAL 64 #define MMQ_Y_Q2_K_PASCAL 64 #define NWARPS_Q2_K_PASCAL 8 #define MMQ_X_Q3_K_RDNA2 128 #define MMQ_Y_Q3_K_RDNA2 64 #define NWARPS_Q3_K_RDNA2 8 #define MMQ_X_Q3_K_RDNA1 32 #define MMQ_Y_Q3_K_RDNA1 128 #define NWARPS_Q3_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q3_K_AMPERE 4 #define MMQ_Y_Q3_K_AMPERE 32 #define NWARPS_Q3_K_AMPERE 4 #else #define MMQ_X_Q3_K_AMPERE 128 #define MMQ_Y_Q3_K_AMPERE 128 #define NWARPS_Q3_K_AMPERE 4 #endif #define MMQ_X_Q3_K_PASCAL 64 #define MMQ_Y_Q3_K_PASCAL 64 #define NWARPS_Q3_K_PASCAL 8 #define MMQ_X_Q4_K_RDNA2 64 #define MMQ_Y_Q4_K_RDNA2 128 #define NWARPS_Q4_K_RDNA2 8 #define MMQ_X_Q4_K_RDNA1 32 #define MMQ_Y_Q4_K_RDNA1 64 #define NWARPS_Q4_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_K_AMPERE 4 #define MMQ_Y_Q4_K_AMPERE 32 #define NWARPS_Q4_K_AMPERE 4 #else #define MMQ_X_Q4_K_AMPERE 64 #define MMQ_Y_Q4_K_AMPERE 128 #define NWARPS_Q4_K_AMPERE 4 #endif #define MMQ_X_Q4_K_PASCAL 64 #define MMQ_Y_Q4_K_PASCAL 64 #define NWARPS_Q4_K_PASCAL 8 #define MMQ_X_Q5_K_RDNA2 64 #define MMQ_Y_Q5_K_RDNA2 128 #define NWARPS_Q5_K_RDNA2 8 #define MMQ_X_Q5_K_RDNA1 32 #define MMQ_Y_Q5_K_RDNA1 64 #define NWARPS_Q5_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q5_K_AMPERE 4 #define MMQ_Y_Q5_K_AMPERE 32 #define NWARPS_Q5_K_AMPERE 4 #else #define MMQ_X_Q5_K_AMPERE 64 #define MMQ_Y_Q5_K_AMPERE 128 #define NWARPS_Q5_K_AMPERE 4 #endif #define MMQ_X_Q5_K_PASCAL 64 #define MMQ_Y_Q5_K_PASCAL 64 #define NWARPS_Q5_K_PASCAL 8 #define MMQ_X_Q6_K_RDNA2 64 #define MMQ_Y_Q6_K_RDNA2 128 #define NWARPS_Q6_K_RDNA2 8 #define MMQ_X_Q6_K_RDNA1 32 #define MMQ_Y_Q6_K_RDNA1 64 #define NWARPS_Q6_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q6_K_AMPERE 4 #define MMQ_Y_Q6_K_AMPERE 32 #define NWARPS_Q6_K_AMPERE 4 #else #define MMQ_X_Q6_K_AMPERE 64 #define MMQ_Y_Q6_K_AMPERE 64 #define NWARPS_Q6_K_AMPERE 4 #endif #define MMQ_X_Q6_K_PASCAL 64 #define MMQ_Y_Q6_K_PASCAL 64 #define NWARPS_Q6_K_PASCAL 8 // QK = number of values after dequantization // QR = QK / number of values before dequantization // QI = number of 32 bit integers before dequantization #define QK4_0 32 #define QR4_0 2 #define QI4_0 (QK4_0 / (4 * QR4_0)) typedef struct { half d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); #define QK4_1 32 #define QR4_1 2 #define QI4_1 (QK4_1 / (4 * QR4_1)) typedef struct { half2 dm; // dm.x = delta, dm.y = min uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding"); #define QK5_0 32 #define QR5_0 2 #define QI5_0 (QK5_0 / (4 * QR5_0)) typedef struct { half d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); #define QK5_1 32 #define QR5_1 2 #define QI5_1 (QK5_1 / (4 * QR5_1)) typedef struct { half2 dm; // dm.x = delta, dm.y = min uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); #define QK8_0 32 #define QR8_0 1 #define QI8_0 (QK8_0 / (4 * QR8_0)) typedef struct { half d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); #define QK8_1 32 #define QR8_1 1 #define QI8_1 (QK8_1 / (4 * QR8_1)) typedef struct { half2 ds; // ds.x = delta, ds.y = sum int8_t qs[QK8_0]; // quants } block_q8_1; static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding"); typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc); typedef void (*load_tiles_cuda_t)( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row); typedef float (*vec_dot_q_mul_mat_cuda_t)( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k); #define QR2_K 4 #define QI2_K (QK_K / (4*QR2_K)) typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants half2 dm; // super-block scale for quantized scales/mins } block_q2_K; static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); #define QR3_K 4 #define QI3_K (QK_K / (4*QR3_K)) typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits #ifdef GGML_QKK_64 uint8_t scales[2]; // scales, quantized with 8 bits #else uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits #endif half d; // super-block scale } block_q3_K; //static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding"); #define QR4_K 2 #define QI4_K (QK_K / (4*QR4_K)) #ifdef GGML_QKK_64 typedef struct { half dm[2]; // super-block scales/mins uint8_t scales[2]; // 4-bit block scales/mins uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding"); #else typedef struct { half2 dm; // super-block scale for quantized scales/mins uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding"); #endif #define QR5_K 2 #define QI5_K (QK_K / (4*QR5_K)) #ifdef GGML_QKK_64 typedef struct { half d; // super-block scale int8_t scales[QK_K/16]; // block scales uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); #else typedef struct { half2 dm; // super-block scale for quantized scales/mins uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); #endif #define QR6_K 2 #define QI6_K (QK_K / (4*QR6_K)) typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales half d; // delta } block_q6_K; static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding"); // In llama.cpp this is only used for intermediate quantization and dot products typedef struct { float d; // delta int8_t qs[QK_K]; // quants int16_t bsums[QK_K/16]; // sum of quants in groups of 16 } block_q8_K; static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps, allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot> static __device__ __forceinline__ void mul_mat_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const block_q_t * x = (const block_q_t *) vx; const block_q8_1 * y = (const block_q8_1 *) vy; const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; const int blocks_per_warp = WARP_SIZE / qi; const int & ncols_dst = ncols_y; const int row_dst_0 = blockIdx.x*mmq_y; const int & row_x_0 = row_dst_0; const int col_dst_0 = blockIdx.y*mmq_x; const int & col_y_0 = col_dst_0; int * tile_x_ql = nullptr; half2 * tile_x_dm = nullptr; int * tile_x_qh = nullptr; int * tile_x_sc = nullptr; allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x); #pragma unroll for (int ir = 0; ir < qr; ++ir) { const int kqs = ir*WARP_SIZE + threadIdx.x; const int kbxd = kqs / QI8_1; #pragma unroll for (int i = 0; i < mmq_x; i += nwarps) { const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); } #pragma unroll for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; const int kby = threadIdx.x % (WARP_SIZE/QI8_1); const int col_y_eff = min(col_y_0 + ids, ncols_y-1); // if the sum is not needed it's faster to transform the scale to f32 ahead of time const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; if (need_sum) { *dsi_dst = *dsi_src; } else { float * dfi_dst = (float *) dsi_dst; *dfi_dst = __low2half(*dsi_src); } } __syncthreads(); // #pragma unroll // unrolling this loop causes too much register pressure for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { #pragma unroll for (int i = 0; i < mmq_y; i += WARP_SIZE) { sum[i/WARP_SIZE][j/nwarps] += vec_dot( tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, threadIdx.x + i, threadIdx.y + j, k); } } } __syncthreads(); } } #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { const int col_dst = col_dst_0 + j + threadIdx.y; if (col_dst >= ncols_dst) { return; } #pragma unroll for (int i = 0; i < mmq_y; i += WARP_SIZE) { const int row_dst = row_dst_0 + threadIdx.x + i; if (row_dst >= nrows_dst) { continue; } dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps]; } } } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { (void)x_qh; (void)x_sc; const int kbx = k / QI4_0; const int kqsx = k % QI4_0; const block_q4_0 * bx0 = (const block_q4_0 *) vx; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; } const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) { int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d; } } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI4_1; const int kqsx = k % QI4_1; const block_q4_1 * bx0 = (const block_q4_1 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) { int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd; x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; } } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { (void)x_qh; (void)x_sc; __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1]; *x_ql = tile_x_qs; *x_dm = tile_x_dm; } static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q4_0 * x = (const block_q4_0 *) vx; const dfloat d = x[ib].d; const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; #ifdef GGML_CUDA_F16 v = __hsub2(v, {8.0f, 8.0f}); v = __hmul2(v, {d, d}); #else v.x = (v.x - 8.0f) * d; v.y = (v.y - 8.0f) * d; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q4_1 * x = (const block_q4_1 *) vx; const dfloat d = __low2half(x[ib].dm); const dfloat m = __high2half(x[ib].dm); const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); v = __hadd2(v, {m, m}); #else v.x = (v.x * d) + m; v.y = (v.y * d) + m; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q5_0 * x = (const block_q5_0 *) vx; const dfloat d = x[ib].d; uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_CUDA_F16 v = __hsub2(v, {16.0f, 16.0f}); v = __hmul2(v, {d, d}); #else v.x = (v.x - 16.0f) * d; v.y = (v.y - 16.0f) * d; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q5_1 * x = (const block_q5_1 *) vx; const dfloat d = __low2half(x[ib].dm); const dfloat m = __high2half(x[ib].dm); uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); v = __hadd2(v, {m, m}); #else v.x = (v.x * d) + m; v.y = (v.y * d) + m; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q8_0 * x = (const block_q8_0 *) vx; const dfloat d = x[ib].d; v.x = x[ib].qs[iqs + 0]; v.y = x[ib].qs[iqs + 1]; #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); #else v.x *= d; v.y *= d; #endif // GGML_CUDA_F16 } template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> static __device__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x); if (i >= k) { return; } const int ib = i/qk; // block index const int iqs = (i%qk)/qr; // quant index const int iybs = i - i%qk; // y block start index const int y_offset = qr == 1 ? 1 : qk/2; // dequantize dfloat2 v; dequantize_kernel(vx, ib, iqs, v); y[iybs + iqs + 0] = v.x; y[iybs + iqs + y_offset] = v.y; } template<typename dst_t> static __device__ void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int64_t i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_0 * x = (const block_q4_0 *)vx + ib; const float d = __half2float(x->d); const float dm = -8*d; const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d * (q[l] & 0xF) + dm; y[l+16] = d * (q[l] >> 4) + dm; } } template<typename dst_t> static __device__ void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int64_t i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_1 * x = (const block_q4_1 *)vx + ib; const float2 d = __half22float2(x->dm); const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d.x * (q[l] & 0xF) + d.y; y[l+16] = d.x * (q[l] >> 4) + d.y; } } //================================== k-quants template<typename dst_t> static __device__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int i = blockIdx.x; const block_q2_K * x = (const block_q2_K *) vx; const int tid = threadIdx.x; #if QK_K == 256 const int n = tid/32; const int l = tid - 32*n; const int is = 8*n + l/16; const uint8_t q = x[i].qs[32*n + l]; dst_t * y = yy + i*QK_K + 128*n; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); #else const int is = tid/16; // 0 or 1 const int il = tid%16; // 0...15 const uint8_t q = x[i].qs[il] >> (2*is); dst_t * y = yy + i*QK_K + 16*is + il; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4); #endif } template<typename dst_t> static __device__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int i = blockIdx.x; const block_q3_K * x = (const block_q3_K *) vx; #if QK_K == 256 const int r = threadIdx.x/4; const int tid = r/2; const int is0 = r%2; const int l0 = 16*is0 + 4*(threadIdx.x%4); const int n = tid / 4; const int j = tid - 4*n; uint8_t m = 1 << (4*n + j); int is = 8*n + 2*j + is0; int shift = 2*j; int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); float d_all = x[i].d; float dl = d_all * (us - 32); dst_t * y = yy + i*QK_K + 128*n + 32*j; const uint8_t * q = x[i].qs + 32*n; const uint8_t * hm = x[i].hmask; for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); #else const int tid = threadIdx.x; const int is = tid/16; // 0 or 1 const int il = tid%16; // 0...15 const int im = il/8; // 0...1 const int in = il%8; // 0...7 dst_t * y = yy + i*QK_K + 16*is + il; const uint8_t q = x[i].qs[il] >> (2*is); const uint8_t h = x[i].hmask[in] >> (2*is + im); const float d = (float)x[i].d; if (is == 0) { y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } else { y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } #endif } #if QK_K == 256 static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { if (j < 4) { d = q[j] & 63; m = q[j + 4] & 63; } else { d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); } } #endif template<typename dst_t> static __device__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q4_K * x = (const block_q4_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int is = 2*il; const int n = 4; dst_t * y = yy + i*QK_K + 64*il + n*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * q = x[i].qs + 32*il + n*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; for (int l = 0; l < n; ++l) { y[l + 0] = d1 * (q[l] & 0xF) - m1; y[l +32] = d2 * (q[l] >> 4) - m2; } #else const int tid = threadIdx.x; const uint8_t * q = x[i].qs; dst_t * y = yy + i*QK_K; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4); y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4); #endif } template<typename dst_t> static __device__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q5_K * x = (const block_q5_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int tid = threadIdx.x; const int il = tid/16; // il is in 0...3 const int ir = tid%16; // ir is in 0...15 const int is = 2*il; // is is in 0...6 dst_t * y = yy + i*QK_K + 64*il + 2*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * ql = x[i].qs + 32*il + 2*ir; const uint8_t * qh = x[i].qh + 2*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; uint8_t hm = 1 << (2*il); y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; hm <<= 1; y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; #else const int tid = threadIdx.x; const uint8_t q = x[i].qs[tid]; const int im = tid/8; // 0...3 const int in = tid%8; // 0...7 const int is = tid/16; // 0 or 1 const uint8_t h = x[i].qh[in] >> im; const float d = x[i].d; dst_t * y = yy + i*QK_K + tid; y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16)); y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16)); #endif } template<typename dst_t> static __device__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q6_K * x = (const block_q6_K *) vx; const int64_t i = blockIdx.x; #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int64_t tid = threadIdx.x; const int64_t ip = tid/32; // ip is 0 or 1 const int64_t il = tid - 32*ip; // 0...32 const int64_t is = 8*ip + il/16; dst_t * y = yy + i*QK_K + 128*ip + il; const float d = x[i].d; const uint8_t * ql = x[i].ql + 64*ip + il; const uint8_t qh = x[i].qh[32*ip + il]; const int8_t * sc = x[i].scales + is; y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); #else // assume 32 threads const int64_t tid = threadIdx.x; const int64_t ip = tid/16; // 0 or 1 const int64_t il = tid - 16*ip; // 0...15 dst_t * y = yy + i*QK_K + 16*ip + il; const float d = x[i].d; const uint8_t ql = x[i].ql[16*ip + il]; const uint8_t qh = x[i].qh[il] >> (2*ip); const int8_t * sc = x[i].scales; y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32); #endif } template<typename dst_t> static __device__ void dequantize_block_q8_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 8*il; const block_q8_0 * x = (const block_q8_0 *)vx + ib; const float d = __half2float(x->d); const int8_t * q = x->qs + 8*il; for (int l = 0; l < 8; ++l) { y[l] = d * q[l]; } } template<typename dst_t> static __device__ void dequantize_block_q8_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q8_K * x = (const block_q8_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int n = 8; dst_t * y = yy + i*QK_K + 64*il + n*ir; const int8_t * q = x[i].qs + 64*il + n*ir; for (int l = 0; l < n; ++l) { y[l] = q[l] * x[i].d; } #else const int tid = threadIdx.x; const uint8_t * q = x[i].qs; float * y = yy + i*QK_K; y[tid] = x[i].d * x[i].scales[0]; #endif } template<typename dst_t> static __device__ void dequantize_block_q5_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { return dequantize_block<QK5_0, QR5_0, dequantize_q5_0>(vx, yy, nb32); } template<typename dst_t> static __device__ void dequantize_block_q5_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { return dequantize_block<QK5_1, QR5_1, dequantize_q5_1>(vx, yy, nb32); } #define DEQUANTIZE_K(QNAME) \ extern "C" __global__ void dequantize_block_##QNAME##_f32(const void * __restrict__ vx, float * __restrict__ y) { \ dequantize_block_##QNAME(vx, y); \ } \ extern "C" __global__ void dequantize_block_##QNAME##_f16(const void * __restrict__ vx, half * __restrict__ y) { \ dequantize_block_##QNAME(vx, y); \ } \ #define DEQUANTIZE(QNAME) \ extern "C" __global__ void dequantize_block_##QNAME##_f32(const void * __restrict__ vx, float * __restrict__ y, const int k) { \ dequantize_block_##QNAME(vx, y, k); \ } \ extern "C" __global__ void dequantize_block_##QNAME##_f16(const void * __restrict__ vx, half * __restrict__ y, const int k) { \ dequantize_block_##QNAME(vx, y, k); \ } \ DEQUANTIZE_K(q2_K) DEQUANTIZE_K(q3_K) DEQUANTIZE_K(q4_K) DEQUANTIZE_K(q5_K) DEQUANTIZE_K(q6_K) DEQUANTIZE_K(q8_K) DEQUANTIZE(q4_0) DEQUANTIZE(q4_1) DEQUANTIZE(q5_0) DEQUANTIZE(q5_1) DEQUANTIZE(q8_0) template <int qk, int qr, dequantize_kernel_t dequantize_kernel> static __device__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) { // qk = quantized weights per x block // qr = number of quantized weights per data value in x block const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row >= nrows) { return; } const int tid = threadIdx.x; const int iter_stride = 2*GGML_CUDA_DMMV_X; const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter const int y_offset = qr == 1 ? 1 : qk/2; // partial sum for each thread #ifdef GGML_CUDA_F16 half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics #else float tmp = 0.0f; #endif // GGML_CUDA_F16 for (int i = 0; i < ncols; i += iter_stride) { const int col = i + vals_per_iter*tid; const int ib = (row*ncols + col)/qk; // x block index const int iqs = (col%qk)/qr; // x quant index const int iybs = col - col%qk; // y block start index // processing >2 values per i iter is faster for fast GPUs #pragma unroll for (int j = 0; j < vals_per_iter; j += 2) { // process 2 vals per j iter // dequantize // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val dfloat2 v; dequantize_kernel(vx, ib, iqs + j/qr, v); // matrix multiplication // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 #ifdef GGML_CUDA_F16 tmp += __hmul2(v, { y[iybs + iqs + j/qr + 0], y[iybs + iqs + j/qr + y_offset] }); #else tmp += v.x * y[iybs + iqs + j/qr + 0]; tmp += v.y * y[iybs + iqs + j/qr + y_offset]; #endif // GGML_CUDA_F16 } } // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { #ifdef GGML_CUDA_F16 dst[row] = tmp.x + tmp.y; #else dst[row] = tmp; #endif // GGML_CUDA_F16 } } extern "C" __global__ void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q2_K * x = (const block_q2_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int s_offset = 8*im; const int y_offset = 128*im + l0; uint32_t aux[4]; const uint8_t * d = (const uint8_t *)aux; const uint8_t * m = (const uint8_t *)(aux + 2); for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset); aux[0] = a[0] & 0x0f0f0f0f; aux[1] = a[1] & 0x0f0f0f0f; aux[2] = (a[0] >> 4) & 0x0f0f0f0f; aux[3] = (a[1] >> 4) & 0x0f0f0f0f; float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3) + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3) + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3) + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3) + y[l+16] * d[1] * ((q[l+16] >> 0) & 3) + y[l+48] * d[3] * ((q[l+16] >> 2) & 3) + y[l+80] * d[5] * ((q[l+16] >> 4) & 3) +y[l+112] * d[7] * ((q[l+16] >> 6) & 3); sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6] + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7]; } tmp += dall * sum1 - dmin * sum2; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; uint32_t uaux[2]; const uint8_t * d = (const uint8_t *)uaux; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint32_t * s = (const uint32_t *)x[i].scales; uaux[0] = s[0] & 0x0f0f0f0f; uaux[1] = (s[0] >> 4) & 0x0f0f0f0f; const float2 dall = __half22float2(x[i].dm); float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t ql = q[l]; sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3) + y[l+16] * d[1] * ((ql >> 2) & 3) + y[l+32] * d[2] * ((ql >> 4) & 3) + y[l+48] * d[3] * ((ql >> 6) & 3); sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7]; } tmp += dall.x * sum1 - dall.y * sum2; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q3_K * x = (const block_q3_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x0303; const uint16_t kmask2 = 0x0f0f; const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0....15 or 0...7 const uint8_t m = 1 << (4*im); const int l0 = n*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int y_offset = 128*im + l0; uint16_t utmp[4]; const int8_t * s = (const int8_t *)utmp; const uint16_t s_shift = 4*im; for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const uint8_t * h = x[i].hmask + l0; const uint16_t * a = (const uint16_t *)x[i].scales; utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4); utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4); utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4); utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4); const float d = x[i].d; float sum = 0; for (int l = 0; l < n; ++l) { sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4)) + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4)) + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4)) + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4)); sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4)) + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4)) + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4)) + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4)); } tmp += d * sum; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14 const int in = offset/8; // 0 or 1 const int im = offset%8; // 0...7 for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint8_t * s = x[i].scales; const float dall = (float)x[i].d; float sum = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t hl = x[i].hmask[im+l] >> in; const uint8_t ql = q[l]; sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4)) + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4)) + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4)) + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q4_K * x = (const block_q4_K *)vx + ib0; #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 const int il = tid/step; // 0...3 const int ir = tid - step*il; // 0...7 or 0...3 const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; #if K_QUANTS_PER_ITERATION == 2 uint32_t q32[4]; const uint8_t * q4 = (const uint8_t *)q32; #else uint16_t q16[4]; const uint8_t * q4 = (const uint8_t *)q16; #endif float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); #if K_QUANTS_PER_ITERATION == 2 const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset); const uint32_t * q2 = q1 + 16; q32[0] = q1[0] & 0x0f0f0f0f; q32[1] = q1[0] & 0xf0f0f0f0; q32[2] = q2[0] & 0x0f0f0f0f; q32[3] = q2[0] & 0xf0f0f0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 4; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4]; s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #else const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset); const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[0] & 0xf0f0; q16[2] = q2[0] & 0x0f0f; q16[3] = q2[0] & 0xf0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 2; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2]; s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #endif } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; uint16_t aux16[2]; const uint8_t * s = (const uint8_t *)aux16; float tmp = 0; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const float * y = yy + i*QK_K + step; const uint16_t * a = (const uint16_t *)x[i].scales; aux16[0] = a[0] & 0x0f0f; aux16[1] = (a[0] >> 4) & 0x0f0f; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2]) + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2]) + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3]) + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) { const int row = blockIdx.x; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q5_K * x = (const block_q5_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = threadIdx.x/2; // 0...15 const int ix = threadIdx.x%2; const int il = tid/4; // 0...3 const int ir = tid - 4*il;// 0...3 const int n = 2; const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; const uint8_t hm1 = 1 << (2*im); const uint8_t hm2 = hm1 << 4; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; uint16_t q16[8]; const uint8_t * q4 = (const uint8_t *)q16; for (int i = ix; i < num_blocks_per_row; i += 2) { const uint8_t * ql1 = x[i].qs + q_offset; const uint8_t * qh = x[i].qh + l0; const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); float4 sum = {0.f, 0.f, 0.f, 0.f}; float smin = 0; const uint16_t * q1 = (const uint16_t *)ql1; const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[8] & 0x0f0f; q16[2] = (q1[0] >> 4) & 0x0f0f; q16[3] = (q1[8] >> 4) & 0x0f0f; q16[4] = q2[0] & 0x0f0f; q16[5] = q2[8] & 0x0f0f; q16[6] = (q2[0] >> 4) & 0x0f0f; q16[7] = (q2[8] >> 4) & 0x0f0f; for (int l = 0; l < n; ++l) { sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0)) + y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0)); sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0)) + y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0)); sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0)) + y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0)); sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0)) + y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0)); smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3] + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7]; } tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; const int im = step/8; const int in = step%8; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const int8_t * s = x[i].scales; const float * y = yy + i*QK_K + step; const float d = x[i].d; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { const uint8_t h = x[i].qh[in+j] >> im; sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16)) + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16)) + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16)) + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q6_K * x = (const block_q6_K *)vx + ib0; #if QK_K == 256 const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 #if K_QUANTS_PER_ITERATION == 1 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 const int is = 0; #else const int l0 = 4 * in; // 0, 4, 8, ..., 28 const int is = in / 4; #endif const int ql_offset = 64*im + l0; const int qh_offset = 32*im + l0; const int s_offset = 8*im + is; const int y_offset = 128*im + l0; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * ql = x[i].ql + ql_offset; const uint8_t * qh = x[i].qh + qh_offset; const int8_t * s = x[i].scales + s_offset; const float d = x[i].d; #if K_QUANTS_PER_ITERATION == 1 float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32) + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32) + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32) + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32) + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32) + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32) + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32) +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32); tmp += sum; #else float sum = 0; for (int l = 0; l < 4; ++l) { sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32) + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32) + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32) + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32); } tmp += sum; #endif } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3 const int step = tid * K_QUANTS_PER_ITERATION; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + step; const uint8_t * ql = x[i].ql + step; const uint8_t * qh = x[i].qh + step; const int8_t * s = x[i].scales; const float d = x[i+0].d; float sum = 0; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32) + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32) + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32) + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { dst[row] = tmp; } } // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q #define VDR_Q4_0_Q8_1_MMVQ 2 #define VDR_Q4_0_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( const int * v, const int * u, const float & d4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = __dp4a(vi0, u[2*i+0], sumi); sumi = __dp4a(vi1, u[2*i+1], sumi); } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 8 from each quant value return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); } #define VDR_Q4_1_Q8_1_MMVQ 2 #define VDR_Q4_1_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( const int * v, const int * u, const half2 & dm4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = __dp4a(vi0, u[2*i+0], sumi); sumi = __dp4a(vi1, u[2*i+1], sumi); } #ifdef GGML_CUDA_F16 const float2 tmp = __half22float2(__hmul2(dm4, ds8)); const float d4d8 = tmp.x; const float m4s8 = tmp.y; #else const float2 dm4f = __half22float2(dm4); const float2 ds8f = __half22float2(ds8); const float d4d8 = dm4f.x * ds8f.x; const float m4s8 = dm4f.y * ds8f.y; #endif // GGML_CUDA_F16 // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1)); } #define VDR_Q5_0_Q8_1_MMVQ 2 #define VDR_Q5_0_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 16 from each quant value return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y); } #define VDR_Q5_1_Q8_1_MMVQ 2 #define VDR_Q5_1_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values } #ifdef GGML_CUDA_F16 const float2 tmp = __half22float2(__hmul2(dm5, ds8)); const float d5d8 = tmp.x; const float m5s8 = tmp.y; #else const float2 dm5f = __half22float2(dm5); const float2 ds8f = __half22float2(ds8); const float d5d8 = dm5f.x * ds8f.x; const float m5s8 = dm5f.y * ds8f.y; #endif // GGML_CUDA_F16 // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it return sumi*d5d8 + m5s8 / (QI5_1 / vdr); } #define VDR_Q8_0_Q8_1_MMVQ 2 #define VDR_Q8_0_Q8_1_MMQ 8 template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl( const int * v, const int * u, const float & d8_0, const float & d8_1) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { // SIMD dot product of quantized values sumi = __dp4a(v[i], u[i], sumi); } return d8_0*d8_1 * sumi; } template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( const int * v, const int * u, const half2 & dm8, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { // SIMD dot product of quantized values sumi = __dp4a(v[i], u[i], sumi); } #ifdef GGML_CUDA_F16 const float2 tmp = __half22float2(__hmul2(dm8, ds8)); const float d8d8 = tmp.x; const float m8s8 = tmp.y; #else const float2 dm8f = __half22float2(dm8); const float2 ds8f = __half22float2(ds8); const float d8d8 = dm8f.x * ds8f.x; const float m8s8 = dm8f.y * ds8f.y; #endif // GGML_CUDA_F16 // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it return sumi*d8d8 + m8s8 / (QI8_1 / vdr); } #define VDR_Q2_K_Q8_1_MMVQ 1 #define VDR_Q2_K_Q8_1_MMQ 2 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR2_K; ++i) { const int sc = scales[2*i]; const int vi = (v >> (2*i)) & 0x03030303; sumf_d += d8[i] * (__dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product // fill int with 4x m int m = sc >> 4; m |= m << 8; m |= m << 16; sumf_m += d8[i] * __dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values } const float2 dm2f = __half22float2(dm2); return dm2f.x*sumf_d - dm2f.y*sumf_m; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float & d8) { int sumi_d = 0; int sumi_m = 0; #pragma unroll for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) { int sumi_d_sc = 0; const int sc = scales[i0 / (QI8_1/2)]; // fill int with 4x m int m = sc >> 4; m |= m << 8; m |= m << 16; #pragma unroll for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_d_sc = __dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product sumi_m = __dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m } sumi_d += sumi_d_sc * (sc & 0xF); } const float2 dm2f = __half22float2(dm2); return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m); } #define VDR_Q3_K_Q8_1_MMVQ 1 #define VDR_Q3_K_Q8_1_MMQ 2 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, const int & scale_offset, const float & d3, const float * __restrict__ d8) { float sumf = 0.0f; #pragma unroll for (int i = 0; i < QR3_K; ++i) { const int isc = scale_offset + 2*i; const int isc_low = isc % (QK_K/32); const int sc_shift_low = 4 * (isc / (QK_K/32)); const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF; const int isc_high = isc % (QK_K/64); const int sc_shift_high = 2 * (isc / (QK_K/64)); const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4; const int sc = (sc_low | sc_high) - 32; const int vil = (vl >> (2*i)) & 0x03030303; const int vih = ((vh >> i) << 2) & 0x04040404; const int vi = __vsubss4(vil, vih); sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product } return d3 * sumf; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d3, const float & d8) { int sumi = 0; #pragma unroll for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) { int sumi_sc = 0; for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_sc = __dp4a(v[i], u[i], sumi_sc); // SIMD dot product } sumi += sumi_sc * scales[i0 / (QI8_1/2)]; } return d3*d8 * sumi; } #define VDR_Q4_K_Q8_1_MMVQ 2 #define VDR_Q4_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR4_K; ++i) { const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F; const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F; const int dot1 = __dp4a(v1i, u[2*i+1], __dp4a(v0i, u[2*i+0], 0)); // SIMD dot product const int dot2 = __dp4a(0x01010101, u[2*i+1], __dp4a(0x01010101, u[2*i+0], 0)); // sum of u sumf_d += d8[i] * (dot1 * sc[i]); sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) { int sumi_d = 0; #pragma unroll for (int j = 0; j < QI8_1; ++j) { sumi_d = __dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product } const float2 ds8f = __half22float2(ds8[i]); sumf_d += ds8f.x * (sc[i] * sumi_d); sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } #define VDR_Q5_K_Q8_1_MMVQ 2 #define VDR_Q5_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR5_K; ++i) { const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F; const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F; const int vh0i = ((vh[0] >> i) << 4) & 0x10101010; const int vh1i = ((vh[1] >> i) << 4) & 0x10101010; const int v0i = vl0i | vh0i; const int v1i = vl1i | vh1i; const int dot1 = __dp4a(v0i, u[2*i+0], __dp4a(v1i, u[2*i+1], 0)); // SIMD dot product const int dot2 = __dp4a(0x01010101, u[2*i+0], __dp4a(0x01010101, u[2*i+1], 0)); // sum of u sumf_d += d8[i] * (dot1 * sc[i]); sumf_m += d8[i] * (dot2 * m[i]); } const float2 dm5f = __half22float2(dm5); return dm5f.x*sumf_d - dm5f.y*sumf_m; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) { int sumi_d = 0; #pragma unroll for (int j = 0; j < QI8_1; ++j) { sumi_d = __dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product } const float2 ds8f = __half22float2(ds8[i]); sumf_d += ds8f.x * (sc[i] * sumi_d); sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } #define VDR_Q6_K_Q8_1_MMVQ 1 #define VDR_Q6_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d, const float * __restrict__ d8) { float sumf = 0.0f; #pragma unroll for (int i = 0; i < QR6_K; ++i) { const int sc = scales[4*i]; const int vil = (vl >> (4*i)) & 0x0F0F0F0F; const int vih = ((vh >> (4*i)) << 4) & 0x30303030; const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32 sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product } return d*sumf; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, const float & d6, const float * __restrict__ d8) { float sumf_d = 0.0f; #pragma unroll for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) { int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale #pragma unroll for (int i = i0; i < i0 + 2; ++i) { sumi_d.x = __dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product sumi_d.x = __dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product sumi_d.y = __dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product sumi_d.y = __dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product } sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y); } return d6 * sumf_d; } static __device__ __forceinline__ float vec_dot_q4_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq; int v[VDR_Q4_0_Q8_1_MMVQ]; int u[2*VDR_Q4_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) { v[i] = get_int_from_uint8(bq4_0->qs, iqs + i); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0); } return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq; int v[VDR_Q4_1_Q8_1_MMVQ]; int u[2*VDR_Q4_1_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) { v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1); } return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q5_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq; int vl[VDR_Q5_0_Q8_1_MMVQ]; int vh[VDR_Q5_0_Q8_1_MMVQ]; int u[2*VDR_Q5_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) { vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i); vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i)); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0); } return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q5_1_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq; int vl[VDR_Q5_1_Q8_1_MMVQ]; int vh[VDR_Q5_1_Q8_1_MMVQ]; int u[2*VDR_Q5_1_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) { vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i); vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i)); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1); } return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q8_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq; int v[VDR_Q8_0_Q8_1_MMVQ]; int u[VDR_Q8_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) { v[i] = get_int_from_int8(bq8_0->qs, iqs + i); u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); } return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d, __low2half(bq8_1->ds)); } static __device__ __forceinline__ float vec_dot_q2_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q2_K * bq2_K = (const block_q2_K *) vbq; const int bq8_offset = QR2_K * (iqs / QI8_1); const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); const uint8_t * scales = bq2_K->scales + scale_offset; const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs); int u[QR2_K]; float d8[QR2_K]; #pragma unroll for (int i = 0; i < QR2_K; ++ i) { u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + i].ds); } return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8); } static __device__ __forceinline__ float vec_dot_q3_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q3_K * bq3_K = (const block_q3_K *) vbq; const int bq8_offset = QR3_K * (iqs / (QI3_K/2)); const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); const float d = bq3_K->d; const int vl = get_int_from_uint8(bq3_K->qs, iqs); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset; int u[QR3_K]; float d8[QR3_K]; #pragma unroll for (int i = 0; i < QR3_K; ++i) { u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + i].ds); } return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8); } static __device__ __forceinline__ float vec_dot_q4_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { #ifndef GGML_QKK_64 const block_q4_K * bq4_K = (const block_q4_K *) vbq; int v[2]; int u[2*QR4_K]; float d8[QR4_K]; // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6 const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2)); // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12 // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44 // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76 // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108 const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); v[0] = q4[0]; v[1] = q4[4]; const uint16_t * scales = (const uint16_t *)bq4_K->scales; uint16_t aux[2]; const int j = bq8_offset/2; if (j < 2) { aux[0] = scales[j+0] & 0x3f3f; aux[1] = scales[j+2] & 0x3f3f; } else { aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); } const uint8_t * sc = (const uint8_t *)aux; const uint8_t * m = sc + 2; for (int i = 0; i < QR4_K; ++i) { const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; d8[i] = __low2float(bq8i->ds); const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); u[2*i+0] = q8[0]; u[2*i+1] = q8[4]; } return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8); #else const block_q4_K * bq4_K = (const block_q4_K *) vbq; float sumf_d = 0.0f; float sumf_m = 0.0f; uint16_t aux16[2]; const uint8_t * s = (const uint8_t *)aux16; const uint16_t * a = (const uint16_t *)bq4_K->scales; aux16[0] = a[0] & 0x0f0f; aux16[1] = (a[0] >> 4) & 0x0f0f; const float dall = bq4_K->dm[0]; const float dmin = bq4_K->dm[1]; const float d8_1 = __low2float(bq8_1[0].ds); const float d8_2 = __low2float(bq8_1[1].ds); const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); const int * q4 = (const int *)bq4_K->qs + (iqs/2); const int v1 = q4[0]; const int v2 = q4[4]; const int dot1 = __dp4a(ui2, v2 & 0x0f0f0f0f, __dp4a(ui1, v1 & 0x0f0f0f0f, 0)); const int dot2 = __dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, __dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0)); const int dot3 = __dp4a(0x01010101, ui2, __dp4a(0x01010101, ui1, 0)); const int dot4 = __dp4a(0x01010101, ui4, __dp4a(0x01010101, ui3, 0)); sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]); sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]); return dall * sumf_d - dmin * sumf_m; #endif } static __device__ __forceinline__ float vec_dot_q5_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { #ifndef GGML_QKK_64 const block_q5_K * bq5_K = (const block_q5_K *) vbq; int vl[2]; int vh[2]; int u[2*QR5_K]; float d8[QR5_K]; const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2)); const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4)); vl[0] = ql[0]; vl[1] = ql[4]; vh[0] = qh[0] >> bq8_offset; vh[1] = qh[4] >> bq8_offset; const uint16_t * scales = (const uint16_t *)bq5_K->scales; uint16_t aux[2]; const int j = bq8_offset/2; if (j < 2) { aux[0] = scales[j+0] & 0x3f3f; aux[1] = scales[j+2] & 0x3f3f; } else { aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); } const uint8_t * sc = (const uint8_t *)aux; const uint8_t * m = sc + 2; #pragma unroll for (int i = 0; i < QR5_K; ++i) { const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; d8[i] = __low2float(bq8i->ds); const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); u[2*i+0] = q8[0]; u[2*i+1] = q8[4]; } return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8); #else const block_q5_K * bq5_K = (const block_q5_K *) vbq; const int8_t * s = bq5_K->scales; const float d = bq5_K->d; const float d8_1 = __low2half(bq8_1[0].ds); const float d8_2 = __low2half(bq8_1[1].ds); const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); const int * ql = (const int *)bq5_K->qs + (iqs/2); const int vl1 = ql[0]; const int vl2 = ql[4]; const int step = 4 * (iqs/2); // 0, 4, 8, 12 const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6 const int in = step%8; // 0, 4, 0, 4 const int vh = (*((const int *)(bq5_K->qh + in))) >> im; const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f); const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f); const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f); const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f); const float sumf_d = d8_1 * (__dp4a(ui1, v1, 0) * s[0] + __dp4a(ui2, v2, 0) * s[1]) + d8_2 * (__dp4a(ui3, v3, 0) * s[2] + __dp4a(ui4, v4, 0) * s[3]); return d * sumf_d; #endif } static __device__ __forceinline__ float vec_dot_q6_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q6_K * bq6_K = (const block_q6_K *) vbq; const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4); const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8); const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4)); const int vl = get_int_from_uint8(bq6_K->ql, iqs); const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift; const int8_t * scales = bq6_K->scales + scale_offset; int u[QR6_K]; float d8[QR6_K]; #pragma unroll for (int i = 0; i < QR6_K; ++i) { u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + 2*i].ds); } return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8); } // https://github.com/ggerganov/llama.cpp/blob/c50a82ce0f71558cbb8e555146ba124251504b38/ggml-cuda/mmvq.cu#L4 typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); template <int ncols_y, int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda> static __device__ void mul_mat_vec_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3)) constexpr int nwarps = 1; constexpr int rows_per_cuda_block = 1; #else constexpr int nwarps = ncols_y <= 4 ? 4 : 2; constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3) const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; const int row0 = rows_per_cuda_block*blockIdx.x; const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; constexpr int blocks_per_iter = vdr * nwarps*WARP_SIZE / qi; // partial sum for each thread float tmp[ncols_y][rows_per_cuda_block] = {0.0f}; const block_q_t * x = (const block_q_t *) vx; const block_q8_1 * y = (const block_q8_1 *) vy; for (int kbx = tid / (qi/vdr); kbx < blocks_per_row_x; kbx += blocks_per_iter) { const int kby = kbx * (qk/QK8_1); // y block index that aligns with kbx // x block quant index when casting the quants to int const int kqs = vdr * (tid % (qi/vdr)); #pragma unroll for (int j = 0; j < ncols_y; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { tmp[j][i] += vec_dot_q_cuda( &x[kbx + (row0 + i)*blocks_per_row_x], &y[j*blocks_per_col_y + kby], kqs); } } } __shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_y][rows_per_cuda_block][WARP_SIZE]; if (threadIdx.y > 0) { #pragma unroll for (int j = 0; j < ncols_y; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { tmp_shared[threadIdx.y-1][j][i][threadIdx.x] = tmp[j][i]; } } } __syncthreads(); if (threadIdx.y > 0) { return; } // sum up partial sums and write back result #pragma unroll for (int j = 0; j < ncols_y; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { #pragma unroll for (int l = 0; l < nwarps-1; ++l) { tmp[j][i] += tmp_shared[l][j][i][threadIdx.x]; } tmp[j][i] = warp_reduce_sum(tmp[j][i]); } if (threadIdx.x < rows_per_cuda_block) { dst[j*nrows_dst + row0 + threadIdx.x] = tmp[j][threadIdx.x]; } } } // batch size = 1 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 2 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 3 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 4 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 5 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 6 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 7 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 8 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) { const int ix = blockDim.x*blockIdx.x + threadIdx.x; if (ix >= kx_padded) { return; } const int iy = blockDim.y*blockIdx.y + threadIdx.y; const int i_padded = iy*kx_padded + ix; block_q8_1 * y = (block_q8_1 *) vy; const int ib = i_padded / QK8_1; // block index const int iqs = i_padded % QK8_1; // quant index const float xi = ix < kx ? x[iy*kx + ix] : 0.0f; float amax = fabsf(xi); float sum = xi; amax = warp_reduce_max(amax); sum = warp_reduce_sum(sum); const float d = amax / 127; const int8_t q = amax == 0.0f ? 0 : roundf(xi / d); y[ib].qs[iqs] = q; if (iqs > 0) { return; } reinterpret_cast<half&>(y[ib].ds.x) = d; reinterpret_cast<half&>(y[ib].ds.y) = sum; } // Kernels from https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda/mmq.cu template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0]; *x_ql = tile_x_ql; *x_dm = (half2 *) tile_x_d; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI5_0; const int kqsx = k % QI5_0; const block_q5_0 * bx0 = (const block_q5_0 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx; const int ql = get_int_from_uint8(bxi->qs, kqsx); const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0)); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; } const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) { int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d; } } static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0; const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; int u[2*VDR_Q5_0_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE]; } return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ> (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI5_1; const int kqsx = k % QI5_1; const block_q5_1 * bx0 = (const block_q5_1 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx; const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1)); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; } const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) { int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd; x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; } } static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1; int u[2*VDR_Q5_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE]; } return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ> (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI8_0; const int kqsx = k % QI8_0; float * x_dmf = (float *) x_dm; const block_q8_0 * bx0 = (const block_q8_0 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI8_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) { int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d; } } static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0], y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI2_K; const int kqsx = k % QI2_K; const block_q2_K * bx0 = (const block_q2_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI2_K; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) { int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd; x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4); x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); } } static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const int kbx = k / QI2_K; const int ky = (k % QI2_K) * QR2_K; const float * y_df = (const float *) y_ds; int v[QR2_K*VDR_Q2_K_Q8_1_MMQ]; const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2)); #pragma unroll for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) { v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303; } const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4; const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE; return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K]; __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_qh = tile_x_qh; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI3_K; const int kqsx = k % QI3_K; const block_q3_K * bx0 = (const block_q3_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI3_K; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) { int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) { int i = i0 + i_offset * 2 + k / (WARP_SIZE/2); if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4); const int ksc = k % (QI3_K/4); const int ksc_low = ksc % (QI3_K/8); const int shift_low = 4 * (ksc / (QI3_K/8)); const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F; const int ksc_high = QI3_K/8; const int shift_high = 2 * ksc; const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030; const int sc = __vsubss4(sc_low | sc_high, 0x20202020); x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc; } } static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kbx = k / QI3_K; const int ky = (k % QI3_K) * QR3_K; const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4; int v[QR3_K*VDR_Q3_K_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) { const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); const int shift = 2 * ((ky % 32) / 8); const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303; const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); const int vlh = (vh << 2) & 0x04040404; v[l] = __vsubss4(vll, vlh); } const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE; return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI4_K; // == 0 if QK_K == 256 const int kqsx = k % QI4_K; // == k if QK_K == 256 const block_q4_K * bx0 = (const block_q4_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) { int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd; #if QK_K == 256 x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm; #else x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]}; #endif } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8); const int * scales = (const int *) bxi->scales; const int ksc = k % (WARP_SIZE/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; } } static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8); const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE; return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8, x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI5_K; // == 0 if QK_K == 256 const int kqsx = k % QI5_K; // == k if QK_K == 256 const block_q5_K * bx0 = (const block_q5_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx; const int ky = QR5_K*kqsx; const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4)); const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010; const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010; const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0; const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4); x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0; x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; } const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) { int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd; #if QK_K == 256 x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm; #endif } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8); const int * scales = (const int *) bxi->scales; const int ksc = k % (WARP_SIZE/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; } } static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k; const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE; return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8, x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI6_K; // == 0 if QK_K == 256 const int kqsx = k % QI6_K; // == k if QK_K == 256 const block_q6_K * bx0 = (const block_q6_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx; const int ky = QR6_K*kqsx; const int ql = get_int_from_uint8(bxi->ql, kqsx); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4)); const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030; const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030; const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0; const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2); x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); } const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) { int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4; x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); } } static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]); const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k; const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE; return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const float * x_dmf = (const float *) x_dm; int u[2*VDR_Q4_0_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; } return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); int u[2*VDR_Q4_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE]; } return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } extern "C" __global__ void mul_mat_q4_0( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_0_AMPERE; const int mmq_y = MMQ_Y_Q4_0_AMPERE; const int nwarps = NWARPS_Q4_0_AMPERE; mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, load_tiles_q4_0<mmq_y, nwarps, true>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q4_1( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_1_AMPERE; const int mmq_y = MMQ_Y_Q4_1_AMPERE; const int nwarps = NWARPS_Q4_1_AMPERE; mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>, load_tiles_q4_1<mmq_y, nwarps, true>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q5_0( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q5_0_AMPERE; const int mmq_y = MMQ_Y_Q5_0_AMPERE; const int nwarps = NWARPS_Q5_0_AMPERE; mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>, load_tiles_q5_0<mmq_y, nwarps, true>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q5_1( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q5_1_AMPERE; const int mmq_y = MMQ_Y_Q5_1_AMPERE; const int nwarps = NWARPS_Q5_1_AMPERE; mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>, load_tiles_q5_1<mmq_y, nwarps, true>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q8_0( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q8_0_AMPERE; const int mmq_y = MMQ_Y_Q8_0_AMPERE; const int nwarps = NWARPS_Q8_0_AMPERE; mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>, load_tiles_q8_0<mmq_y, nwarps, true>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q2_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q2_K_AMPERE; const int mmq_y = MMQ_Y_Q2_K_AMPERE; const int nwarps = NWARPS_Q2_K_AMPERE; mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>, load_tiles_q2_K<mmq_y, nwarps, true>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q3_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q3_K_AMPERE; const int mmq_y = MMQ_Y_Q3_K_AMPERE; const int nwarps = NWARPS_Q3_K_AMPERE; mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>, load_tiles_q3_K<mmq_y, nwarps, true>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q4_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_K_AMPERE; const int mmq_y = MMQ_Y_Q4_K_AMPERE; const int nwarps = NWARPS_Q4_K_AMPERE; mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>, load_tiles_q4_K<mmq_y, nwarps, true>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q5_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q5_K_AMPERE; const int mmq_y = MMQ_Y_Q5_K_AMPERE; const int nwarps = NWARPS_Q5_K_AMPERE; mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>, load_tiles_q5_K<mmq_y, nwarps, true>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q6_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q6_K_AMPERE; const int mmq_y = MMQ_Y_Q6_K_AMPERE; const int nwarps = NWARPS_Q6_K_AMPERE; mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>, load_tiles_q6_K<mmq_y, nwarps, true>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); }
candle/candle-kernels/src/quantized.cu/0
{ "file_path": "candle/candle-kernels/src/quantized.cu", "repo_id": "candle", "token_count": 85333 }
33
#include <metal_stdlib> using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) #define MIN(x, y) ((x) < (y) ? (x) : (y)) METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } constant int THREADGROUP_SIZE = 2048; template<typename T> METAL_FUNC void argmin( constant size_t &num_dims, constant size_t *dims, constant size_t *strides, constant size_t &el_to_sum_per_block, device const T *src, device uint *dst, uint id, uint tid, uint dst_id, uint block_dim, threadgroup T *shared_memory, threadgroup uint *shared_indices ) { bool notset = true; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = start_idx + el_to_sum_per_block; size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); if (notset || src[strided_i] < shared_memory[tid]) { shared_memory[tid] = src[strided_i]; /* Assume that the reduction takes place over the last dimension which is contiguous. */ shared_indices[tid] = idx % dims[num_dims - 1]; notset = false; } idx += block_dim; } threadgroup_barrier(mem_flags::mem_none); // reduction in shared memory for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s && shared_memory[tid + s] < shared_memory[tid]) { shared_indices[tid] = shared_indices[tid + s]; shared_memory[tid] = shared_memory[tid + s]; } \ threadgroup_barrier(mem_flags::mem_none); } if (tid == 0) { dst[dst_id] = shared_indices[0]; } } #define ARGMIN(NAME, T, MAXVALUE) \ kernel void NAME( \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ constant size_t &el_to_sum_per_block, \ device const T *src, \ device uint *dst, \ uint id [[ thread_position_in_grid ]], \ uint tid [[ thread_index_in_threadgroup ]], \ uint dst_id [[ threadgroup_position_in_grid ]], \ uint block_dim [[ threads_per_threadgroup ]] \ ) { \ threadgroup T shared_memory[THREADGROUP_SIZE]; \ threadgroup uint shared_indices[THREADGROUP_SIZE]; \ shared_memory[tid] = MAXVALUE; \ shared_indices[tid] = 0xFFFFFFFF; \ argmin<T>(num_dims, dims, strides, el_to_sum_per_block, src, dst, id, tid, dst_id, block_dim, shared_memory, shared_indices); \ } \ template<typename T> METAL_FUNC void argmax( constant size_t & num_dims, constant size_t * dims, constant size_t * strides, constant size_t & el_to_sum_per_block, device const T * src, device uint * dst, uint id, uint tid, uint dst_id, uint block_dim, threadgroup T * shared_memory, threadgroup uint * shared_indices ) { // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = start_idx + el_to_sum_per_block; size_t idx = start_idx + tid; bool notset = true; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); if (notset || shared_memory[tid] < src[strided_i]) { shared_memory[tid] = src[strided_i]; shared_indices[tid] = idx % dims[num_dims - 1]; notset = false; } idx += block_dim; } threadgroup_barrier(mem_flags::mem_none); // reduction in shared memory for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s && shared_memory[tid + s] > shared_memory[tid]) { shared_indices[tid] = shared_indices[tid + s]; shared_memory[tid] = shared_memory[tid + s]; } threadgroup_barrier(mem_flags::mem_none); } // Thread 0 writes the result of the reduction if (tid == 0) { dst[dst_id] = shared_indices[0]; } } #define ARGMAX(NAME, T, MINVALUE) \ kernel void NAME( \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ constant size_t &el_to_sum_per_block, \ device const T *src, \ device uint *dst, \ uint id [[ thread_position_in_grid ]], \ uint tid [[ thread_index_in_threadgroup ]], \ uint dst_id [[ threadgroup_position_in_grid ]], \ uint block_dim [[ threads_per_threadgroup ]] \ ) { \ threadgroup T shared_memory[THREADGROUP_SIZE]; \ threadgroup uint shared_indices[THREADGROUP_SIZE]; \ shared_memory[tid] = MINVALUE; \ shared_indices[tid] = 0xFFFFFFFF; \ argmax<T>(num_dims, dims, strides, el_to_sum_per_block, src, dst, id, tid, dst_id, block_dim, shared_memory, shared_indices); \ } \ template<typename T> METAL_FUNC void reduce( constant size_t & num_dims, constant size_t * dims, constant size_t * strides, constant size_t & el_to_sum_per_block, device const T * src, device T * dst, uint id, uint tid, uint dst_id, uint block_dim, threadgroup T * shared_memory, T (*fn)(T, T) ) { // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = start_idx + el_to_sum_per_block; size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); T x = shared_memory[tid]; T y = src[strided_i]; shared_memory[tid] = fn(x, y); idx += block_dim; } threadgroup_barrier(mem_flags::mem_none); // reduction in shared memory for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s) { T x = shared_memory[tid]; T y = shared_memory[tid + s]; shared_memory[tid] = fn(x, y); } threadgroup_barrier(mem_flags::mem_none); } if (tid == 0) { dst[dst_id] = shared_memory[0]; } } #define REDUCE(FN, NAME, T, START) \ METAL_FUNC T NAME##_##op(T x, T y) { return FN; } \ kernel void NAME( \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ constant size_t &el_to_sum_per_block, \ device const T *src, \ device T *dst, \ uint id [[ thread_position_in_grid ]], \ uint tid [[ thread_index_in_threadgroup ]], \ uint dst_id [[ threadgroup_position_in_grid ]], \ uint block_dim [[ threads_per_threadgroup ]] \ ) { \ threadgroup T shared_memory[THREADGROUP_SIZE]; \ shared_memory[tid] = START; \ reduce<T>(num_dims, dims, strides, el_to_sum_per_block, src, dst, id, tid, dst_id, block_dim, shared_memory, NAME##_##op); \ } \ template<typename T> METAL_FUNC void softmax( constant size_t & src_numel, constant size_t & el_to_sum_per_block, device const T * src, device T * dst, uint id, uint tid, uint dst_id, uint block_dim, threadgroup float * shared_memory ) { size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; float tmp = -INFINITY; while (idx < stop_idx) { tmp = MAX(tmp, float(src[idx])); idx += block_dim; } shared_memory[tid] = tmp; threadgroup_barrier(mem_flags::mem_threadgroup); for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s) { shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]);\ } threadgroup_barrier(mem_flags::mem_threadgroup); } /* wait for shared_memory[0] to be filled */ threadgroup_barrier(mem_flags::mem_threadgroup); float _max = shared_memory[0]; /* prevent tid=0 from overwriting _max before other threads have written it */ threadgroup_barrier(mem_flags::mem_threadgroup); shared_memory[tid] = 0; idx = start_idx + tid; while (idx < stop_idx) { const float val = exp(float(src[idx]) - _max); dst[idx] = T(val); shared_memory[tid] += val; idx += block_dim; } threadgroup_barrier(mem_flags::mem_threadgroup); for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s) { shared_memory[tid] += shared_memory[tid + s]; } threadgroup_barrier(mem_flags::mem_threadgroup); } const T inv_acc = T(1.0 / shared_memory[0]); idx = start_idx + tid; while (idx < stop_idx) { dst[idx] *= inv_acc; idx += block_dim; } } #define SOFTMAX(NAME, T) \ kernel void NAME( \ constant size_t &src_numel, \ constant size_t &el_to_sum_per_block, \ device const T *src, \ device T *dst, \ uint id [[ thread_position_in_grid ]], \ uint tid [[ thread_index_in_threadgroup ]], \ uint dst_id [[ threadgroup_position_in_grid ]], \ uint block_dim [[ threads_per_threadgroup ]] \ ) { \ threadgroup float shared_memory[THREADGROUP_SIZE]; \ shared_memory[tid] = -INFINITY; \ softmax<T>(src_numel, el_to_sum_per_block, src, dst, id, tid, dst_id, block_dim, shared_memory); \ } \ template<typename T> METAL_FUNC void rmsnorm( constant size_t & src_numel, constant size_t & el_to_sum_per_block, device const T * src, device T * dst, device const T * alpha, constant float & eps, uint id, uint tid, uint dst_id, uint block_dim, threadgroup float * shared_memory ) { size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; float tmp = 0; while (idx < stop_idx) { tmp = tmp + float(src[idx]) * float(src[idx]); idx += block_dim; } shared_memory[tid] = tmp; threadgroup_barrier(mem_flags::mem_threadgroup); for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s) { shared_memory[tid] = shared_memory[tid] + shared_memory[tid + s]; } threadgroup_barrier(mem_flags::mem_threadgroup); } /* wait for shared_memory[0] to be filled */ threadgroup_barrier(mem_flags::mem_threadgroup); float norm = sqrt(shared_memory[0] / float(el_to_sum_per_block) + eps); float inv_norm = 1.0f / norm; idx = start_idx + tid; while (idx < stop_idx) { float val = float(src[idx]) * inv_norm; if (alpha != nullptr) { val *= float(alpha[idx - start_idx]); } dst[idx] = T(val); idx += block_dim; } } template<typename T> METAL_FUNC void layernorm( constant size_t & src_numel, constant size_t & el_to_sum_per_block, device const T * src, device T * dst, device const T * alpha, device const T * beta, constant float & eps, uint id, uint tid, uint dst_id, uint block_dim, threadgroup float * shared_memory ) { size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; float tmp1 = 0; float tmp2 = 0; while (idx < stop_idx) { tmp1 += float(src[idx]); tmp2 += float(src[idx]) * float(src[idx]); idx += block_dim; } shared_memory[tid] = tmp1; shared_memory[tid + block_dim] = tmp2; threadgroup_barrier(mem_flags::mem_threadgroup); for (uint s = block_dim / 2; s > 0; s >>= 1) { if (tid < s) { shared_memory[tid] = shared_memory[tid] + shared_memory[tid + s]; shared_memory[block_dim + tid] = shared_memory[block_dim + tid] + shared_memory[block_dim + tid + s]; } threadgroup_barrier(mem_flags::mem_threadgroup); } /* wait for shared_memory[0] to be filled */ threadgroup_barrier(mem_flags::mem_threadgroup); float mean = shared_memory[0] / float(el_to_sum_per_block); float var = shared_memory[block_dim] / float(el_to_sum_per_block) - mean * mean; float inv_norm = 1.0f / sqrt(var + eps); idx = start_idx + tid; while (idx < stop_idx) { float val = (float(src[idx]) - mean) * inv_norm; if (alpha != nullptr) { val *= float(alpha[idx - start_idx]); } if (beta != nullptr) { val += float(beta[idx - start_idx]); } dst[idx] = T(val); idx += block_dim; } } #define RMSNORM(NAME, T) \ kernel void NAME( \ constant size_t &src_numel, \ constant size_t &el_to_sum_per_block, \ device const T *src, \ device T *dst, \ device const T *alpha, \ constant float &eps, \ uint id [[ thread_position_in_grid ]], \ uint tid [[ thread_index_in_threadgroup ]], \ uint dst_id [[ threadgroup_position_in_grid ]], \ uint block_dim [[ threads_per_threadgroup ]] \ ) { \ threadgroup float shared_memory[THREADGROUP_SIZE]; \ shared_memory[tid] = 0; \ rmsnorm<T>(src_numel, el_to_sum_per_block, src, dst, alpha, eps, id, tid, dst_id, block_dim, shared_memory); \ } \ #define LAYERNORM(NAME, T) \ kernel void NAME( \ constant size_t &src_numel, \ constant size_t &el_to_sum_per_block, \ device const T *src, \ device T *dst, \ device const T *alpha, \ device const T *beta, \ constant float &eps, \ uint id [[ thread_position_in_grid ]], \ uint tid [[ thread_index_in_threadgroup ]], \ uint dst_id [[ threadgroup_position_in_grid ]], \ uint block_dim [[ threads_per_threadgroup ]] \ ) { \ threadgroup float shared_memory[THREADGROUP_SIZE]; \ shared_memory[tid] = 0; \ layernorm<T>(src_numel, el_to_sum_per_block, src, dst, alpha, beta, eps, id, tid, dst_id, block_dim, shared_memory); \ } \ template<typename T> METAL_FUNC void ropei( constant size_t &bh, constant size_t &td, device const T *src, device const T *cos, device const T *sin, device T *dst, uint tid ) { if (2 * tid >= bh * td) { return; } size_t rope_idx = tid % (td / 2); T c = cos[rope_idx]; T s = sin[rope_idx]; dst[2 * tid] = src[2 * tid] * c - src[2 * tid + 1] * s; dst[2 * tid + 1] = src[2 * tid] * s + src[2 * tid + 1] * c; } template<typename T> METAL_FUNC void rope( constant size_t &bh, constant size_t &td, constant size_t &d, device const T *src, device const T *cos, device const T *sin, device T *dst, uint idx ) { if (2 * idx >= bh * td) { return; } size_t i_bh = idx / (td / 2); size_t i_td = idx - (td / 2) * i_bh; size_t i_t = i_td / (d / 2); size_t i_d = i_td - (d / 2) * i_t; size_t i1 = i_bh * td + i_t * d + i_d; size_t i2 = i1 + d / 2; size_t i_cs = i_t * (d / 2) + i_d; T c = cos[i_cs]; T s = sin[i_cs]; dst[i1] = src[i1] * c - src[i2] * s; dst[i2] = src[i1] * s + src[i2] * c; } template<typename T> METAL_FUNC void rope_thd( constant size_t &b, constant size_t &t, constant size_t &h, constant size_t &d, device const T *src, device const T *cos, device const T *sin, device T *dst, uint idx ) { if (2 * idx >= b * t * h * d) { return; } const size_t i_bth = idx / (d / 2); const size_t i_d = idx - (d / 2) * i_bth; const size_t i_t = (i_bth / h) % t; const size_t i1 = i_bth * d + i_d; const size_t i2 = i1 + d / 2; const size_t i_cs = i_t * (d / 2) + i_d; T c = cos[i_cs]; T s = sin[i_cs]; dst[i1] = src[i1] * c - src[i2] * s; dst[i2] = src[i1] * s + src[i2] * c; } #define ROPE(FN_NAME, FN_NAME_I, FN_NAME_THD, TYPENAME) \ kernel void FN_NAME_I( \ constant size_t &bh, \ constant size_t &td, \ device const TYPENAME *src, \ device const TYPENAME *cos, \ device const TYPENAME *sin, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ ropei<TYPENAME>(bh, td, src, cos, sin, dst, tid); \ }\ kernel void FN_NAME( \ constant size_t &bh, \ constant size_t &td, \ constant size_t &d, \ device const TYPENAME *src, \ device const TYPENAME *cos, \ device const TYPENAME *sin, \ device TYPENAME *dst, \ uint idx [[ thread_position_in_grid ]] \ ) { \ rope<TYPENAME>(bh, td, d, src, cos, sin, dst, idx); \ }\ kernel void FN_NAME_THD( \ constant size_t &b, \ constant size_t &t, \ constant size_t &h, \ constant size_t &d, \ device const TYPENAME *src, \ device const TYPENAME *cos, \ device const TYPENAME *sin, \ device TYPENAME *dst, \ uint idx [[ thread_position_in_grid ]] \ ) { \ rope_thd<TYPENAME>(b, t, h, d, src, cos, sin, dst, idx); \ }\ REDUCE(x + y, fast_sum_f32_strided, float, 0) REDUCE(x + y, fast_sum_u32_strided, uint, 0) REDUCE(x + y, fast_sum_f16_strided, half, 0) REDUCE(x + y, fast_sum_u8_strided, uint8_t, 0) REDUCE(x * y, fast_mul_f32_strided, float, 1) REDUCE(x * y, fast_mul_u32_strided, uint, 1) REDUCE(x * y, fast_mul_f16_strided, half, 1) REDUCE(MAX(x, y), fast_max_f32_strided, float, -HUGE_VALF) REDUCE(MAX(x, y), fast_max_u32_strided, uint, 0) REDUCE(MAX(x, y), fast_max_f16_strided, half, -HUGE_VALH) REDUCE(MAX(x, y), fast_max_u8_strided, uint8_t, 0) REDUCE(MIN(x, y), fast_min_f32_strided, float, HUGE_VALF) REDUCE(MIN(x, y), fast_min_u32_strided, uint, 0xFFFFFFFF) REDUCE(MIN(x, y), fast_min_f16_strided, half, HUGE_VALH) REDUCE(MIN(x, y), fast_min_u8_strided, uint8_t, 0xFF) ARGMIN(fast_argmin_f32_strided, float, HUGE_VALF) ARGMIN(fast_argmin_f16_strided, half, HUGE_VALH) ARGMIN(fast_argmin_u32_strided, uint, 0xFFFFFFFF) ARGMIN(fast_argmin_u8_strided, uint8_t, 0xFF) ARGMAX(fast_argmax_f32_strided, float, -HUGE_VALF) ARGMAX(fast_argmax_f16_strided, half, -HUGE_VALH) ARGMAX(fast_argmax_u32_strided, uint, 0) ARGMAX(fast_argmax_u8_strided, uint8_t, 0) SOFTMAX(softmax_f32, float) SOFTMAX(softmax_f16, half) RMSNORM(rmsnorm_f32, float) RMSNORM(rmsnorm_f16, half) LAYERNORM(layernorm_f32, float) LAYERNORM(layernorm_f16, half) ROPE(rope_f32, rope_i_f32, rope_thd_f32, float) ROPE(rope_f16, rope_i_f16, rope_thd_f16, half) #if __METAL_VERSION__ >= 220 REDUCE(x + y, fast_sum_i64_strided, int64_t, 0) REDUCE(MIN(x, y), fast_min_i64_strided, int64_t, INT_MAX) REDUCE(MAX(x, y), fast_max_i64_strided, int64_t, INT_MIN) ARGMIN(fast_argmin_i64_strided, int64_t, INT_MAX) ARGMAX(fast_argmax_i64_strided, int64_t, INT_MIN) #endif #if defined(__HAVE_BFLOAT__) REDUCE(x + y, fast_sum_bf16, bfloat, 0) REDUCE(x + y, fast_sum_bf16_strided, half, 0) REDUCE(x * y, fast_mul_bf16, bfloat, 1) REDUCE(x * y, fast_mul_bf16_strided, bfloat, 1) REDUCE(MAX(x, y), fast_max_bf16, bfloat, -HUGE_VALBF) REDUCE(MAX(x, y), fast_max_bf16_strided, bfloat, -HUGE_VALBF) REDUCE(MIN(x, y), fast_min_bf16, bfloat, HUGE_VALBF) REDUCE(MIN(x, y), fast_min_bf16_strided, bfloat, HUGE_VALBF) ARGMIN(fast_argmin_bf16, bfloat, HUGE_VALBF) ARGMAX(fast_argmax_bf16, bfloat, -HUGE_VALBF) SOFTMAX(softmax_bf16, bfloat) RMSNORM(rmsnorm_bf16, bfloat) LAYERNORM(layernorm_bf16, bfloat) ROPE(rope_bf16, rope_i_bf16, rope_thd_bf16, bfloat) #endif
candle/candle-metal-kernels/src/reduce.metal/0
{ "file_path": "candle/candle-metal-kernels/src/reduce.metal", "repo_id": "candle", "token_count": 9140 }
34
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{DType, Device, Result, Tensor}; use candle_nn::{linear, AdamW, Linear, Module, Optimizer, ParamsAdamW, VarBuilder, VarMap}; fn gen_data() -> Result<(Tensor, Tensor)> { // Generate some sample linear data. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; Ok((sample_xs, sample_ys)) } fn main() -> Result<()> { let (sample_xs, sample_ys) = gen_data()?; // Use backprop to run a linear regression between samples and get the coefficients back. let varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, DType::F32, &Device::Cpu); let model = linear(2, 1, vb.pp("linear"))?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(varmap.all_vars(), params)?; for step in 0..10000 { let ys = model.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; println!("{step} {}", loss.to_vec0::<f32>()?); } Ok(()) }
candle/candle-nn/examples/basic_optimizer.rs/0
{ "file_path": "candle/candle-nn/examples/basic_optimizer.rs", "repo_id": "candle", "token_count": 595 }
35
//! Various optimization algorithms. use candle::{Result, Tensor, Var}; /// The interface optimizers should implement. pub trait Optimizer: Sized { type Config: Sized; fn new(vars: Vec<Var>, config: Self::Config) -> Result<Self>; fn step(&mut self, grads: &candle::backprop::GradStore) -> Result<()>; fn learning_rate(&self) -> f64; fn set_learning_rate(&mut self, lr: f64); fn empty(config: Self::Config) -> Result<Self> { Self::new(vec![], config) } fn backward_step(&mut self, loss: &Tensor) -> Result<()> { let grads = loss.backward()?; self.step(&grads) } fn from_slice(vars: &[&Var], config: Self::Config) -> Result<Self> { let vars: Vec<_> = vars.iter().map(|&v| v.clone()).collect(); Self::new(vars, config) } } /// Optimizer for Stochastic Gradient Descent. /// /// Contrary to the PyTorch implementation of SGD, this version does not support momentum. #[derive(Debug)] pub struct SGD { vars: Vec<Var>, learning_rate: f64, } impl Optimizer for SGD { type Config = f64; fn new(vars: Vec<Var>, learning_rate: f64) -> Result<Self> { let vars = vars .into_iter() .filter(|var| var.dtype().is_float()) .collect(); Ok(Self { vars, learning_rate, }) } fn learning_rate(&self) -> f64 { self.learning_rate } fn step(&mut self, grads: &candle::backprop::GradStore) -> Result<()> { for var in self.vars.iter() { if let Some(grad) = grads.get(var) { var.set(&var.sub(&(grad * self.learning_rate)?)?)?; } } Ok(()) } fn set_learning_rate(&mut self, lr: f64) { self.learning_rate = lr } } impl SGD { pub fn into_inner(self) -> Vec<Var> { self.vars } pub fn push(&mut self, var: &Var) { self.vars.push(var.clone()) } } #[derive(Clone, Debug)] pub struct ParamsAdamW { pub lr: f64, pub beta1: f64, pub beta2: f64, pub eps: f64, pub weight_decay: f64, } impl Default for ParamsAdamW { fn default() -> Self { Self { lr: 0.001, beta1: 0.9, beta2: 0.999, eps: 1e-8, weight_decay: 0.01, } } } #[derive(Debug)] struct VarAdamW { var: Var, first_moment: Var, second_moment: Var, } #[derive(Debug)] pub struct AdamW { vars: Vec<VarAdamW>, step_t: usize, params: ParamsAdamW, } impl Optimizer for AdamW { type Config = ParamsAdamW; fn new(vars: Vec<Var>, params: ParamsAdamW) -> Result<Self> { let vars = vars .into_iter() .filter(|var| var.dtype().is_float()) .map(|var| { let dtype = var.dtype(); let shape = var.shape(); let device = var.device(); let first_moment = Var::zeros(shape, dtype, device)?; let second_moment = Var::zeros(shape, dtype, device)?; Ok(VarAdamW { var, first_moment, second_moment, }) }) .collect::<Result<Vec<_>>>()?; Ok(Self { vars, params, step_t: 0, }) } fn learning_rate(&self) -> f64 { self.params.lr } fn set_learning_rate(&mut self, lr: f64) { self.params.lr = lr } fn step(&mut self, grads: &candle::backprop::GradStore) -> Result<()> { self.step_t += 1; let lr = self.params.lr; let lambda = self.params.weight_decay; let lr_lambda = lr * lambda; let beta1 = self.params.beta1; let beta2 = self.params.beta2; let scale_m = 1f64 / (1f64 - beta1.powi(self.step_t as i32)); let scale_v = 1f64 / (1f64 - beta2.powi(self.step_t as i32)); for var in self.vars.iter() { let theta = &var.var; let m = &var.first_moment; let v = &var.second_moment; if let Some(g) = grads.get(theta) { // This involves locking 3 RWLocks per params, if the parameters are large this // should not be an issue but this may be problematic with models with lots of // small parameters. let next_m = ((m.as_tensor() * beta1)? + (g * (1.0 - beta1))?)?; let next_v = ((v.as_tensor() * beta2)? + (g.sqr()? * (1.0 - beta2))?)?; let m_hat = (&next_m * scale_m)?; let v_hat = (&next_v * scale_v)?; let next_theta = (theta.as_tensor() * (1f64 - lr_lambda))?; let adjusted_grad = (m_hat / (v_hat.sqrt()? + self.params.eps)?)?; let next_theta = (next_theta - (adjusted_grad * lr)?)?; m.set(&next_m)?; v.set(&next_v)?; theta.set(&next_theta)?; } } Ok(()) } } impl AdamW { pub fn new_lr(vars: Vec<Var>, learning_rate: f64) -> Result<Self> { let params = ParamsAdamW { lr: learning_rate, ..ParamsAdamW::default() }; Self::new(vars, params) } pub fn params(&self) -> &ParamsAdamW { &self.params } pub fn set_params(&mut self, params: ParamsAdamW) { self.params = params; } }
candle/candle-nn/src/optim.rs/0
{ "file_path": "candle/candle-nn/src/optim.rs", "repo_id": "candle", "token_count": 2798 }
36
use std::io::Result; fn main() -> Result<()> { prost_build::compile_protos(&["src/onnx.proto3"], &["src/"])?; Ok(()) }
candle/candle-onnx/build.rs/0
{ "file_path": "candle/candle-onnx/build.rs", "repo_id": "candle", "token_count": 60 }
37
from dataclasses import dataclass from typing import Optional from candle.nn import Module, Embedding, LayerNorm, Linear, ModuleList from candle import Tensor import candle import candle.functional as F from typing import Tuple, Optional @dataclass class Config: vocab_size: int = 30522 hidden_size: int = 768 num_hidden_layers: int = 12 num_attention_heads: int = 12 intermediate_size: int = 3072 hidden_act: str = "gelu" hidden_dropout_prob: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 layer_norm_eps: float = 1e-12 pad_token_id: int = 0 position_embedding_type: str = "absolute" use_cache: bool = True classifier_dropout: Optional[float] = None model_type: Optional[str] = "bert" class BertSelfAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / self.num_attention_heads) all_head_size = int(config.num_attention_heads * self.attention_head_size) hidden_size = config.hidden_size self.query = Linear(hidden_size, all_head_size) self.key = Linear(hidden_size, all_head_size) self.value = Linear(hidden_size, all_head_size) def transpose_for_scores(self, x: Tensor) -> Tensor: new_x_shape = x.shape[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.reshape(new_x_shape).transpose(1, 2) return x.contiguous() def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: query = self.query.forward(hidden_states) key = self.key.forward(hidden_states) value = self.value.forward(hidden_states) query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) attention_scores = query.matmul(key.t()) attention_scores = attention_scores / float(self.attention_head_size) ** 0.5 if attention_mask is not None: b_size, _, _, last_dim = attention_scores.shape attention_scores = attention_scores.broadcast_add(attention_mask.reshape((b_size, 1, 1, last_dim))) attention_probs = F.softmax(attention_scores, dim=-1) context_layer = attention_probs.matmul(value) context_layer = context_layer.transpose(1, 2).contiguous() context_layer = context_layer.flatten_from(-2) return context_layer class BertSelfOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, hidden_states: Tensor, attention_mask: None) -> Tensor: self_outputs = self.self.forward(hidden_states, attention_mask=attention_mask) attention_output = self.output.forward(self_outputs, hidden_states) return attention_output class BertIntermediate(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.intermediate_size) self.act = F.gelu if config.hidden_act == "gelu" else F.relu def forward(self, hidden_states: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.act(hidden_states) class BertOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertLayer(Module): def __init__(self, config: Config) -> None: super().__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: attention_output = self.attention.forward(hidden_states, attention_mask=attention_mask) # TODO: Support cross-attention? # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 # TODO: Support something similar to `apply_chunking_to_forward`? intermediate_output = self.intermediate.forward(attention_output) layer_output = self.output.forward(intermediate_output, attention_output) return layer_output class BertEncoder(Module): def __init__(self, config: Config) -> None: super().__init__() self.layer = ModuleList() for _ in range(config.num_hidden_layers): self.layer.append(BertLayer(config)) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: for l in self.layer: hidden_states = l.forward(hidden_states, attention_mask=attention_mask) return hidden_states class BertEmbeddings(Module): def __init__(self, config: Config) -> None: super().__init__() self.word_embeddings = Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.position_ids = candle.Tensor(list(range(config.max_position_embeddings))).reshape( (1, config.max_position_embeddings) ) def forward(self, input_ids: Tensor, token_type_ids: Tensor) -> Tensor: (_batch_size, seq_len) = input_ids.shape input_embeddings = self.word_embeddings.forward(input_ids) token_type_embeddings = self.token_type_embeddings.forward(token_type_ids) embeddings: Tensor = input_embeddings + token_type_embeddings position_ids = list(range(seq_len)) position_ids = Tensor(position_ids).to_dtype(input_ids.dtype).to_device(input_ids.device) embeddings = embeddings.broadcast_add(self.position_embeddings.forward(position_ids)) embeddings = self.LayerNorm(embeddings) return embeddings class BertPooler(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.activation = F.tanh def forward(self, hidden_states: Tensor) -> Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense.forward(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def masked_fill(on_false: float, mask: Tensor, on_true: float): shape = mask.shape on_true = candle.tensor(on_true).broadcast_as(shape) on_false = candle.tensor(on_false).broadcast_as(shape) return mask.where_cond(on_true, on_false) # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874 class BertModel(Module): def __init__(self, config: Config, add_pooling_layer=True) -> None: super().__init__() self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None def forward( self, input_ids: Tensor, token_type_ids: Tensor, attention_mask=None ) -> Tuple[Tensor, Optional[Tensor]]: if attention_mask is not None: # Replace 0s with -inf, and 1s with 0s. attention_mask = masked_fill(float("-inf"), attention_mask, 1.0) embeddings = self.embeddings.forward(input_ids, token_type_ids) encoder_out = self.encoder.forward(embeddings, attention_mask=attention_mask) pooled_output = self.pooler(encoder_out) if self.pooler is not None else None return encoder_out, pooled_output
candle/candle-pyo3/py_src/candle/models/bert.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/models/bert.py", "repo_id": "candle", "token_count": 3528 }
38
# This example shows how the candle Python api can be used to replicate llama.cpp. import sys from typing import Dict, Tuple, Any import candle from candle.models.llama import QuantizedLlama from candle import utils MAX_SEQ_LEN = 4096 def gguf_rename(tensor_name: str): if tensor_name == "token_embd.weight": return "tok_embeddings.weight" if tensor_name == "output_norm.weight": return "norm.weight" tensor_name = tensor_name.replace("blk.", "layers.") tensor_name = tensor_name.replace(".attn_q.", ".attention.wq.") tensor_name = tensor_name.replace(".attn_k.", ".attention.wk.") tensor_name = tensor_name.replace(".attn_v.", ".attention.wv.") tensor_name = tensor_name.replace(".attn_output.", ".attention.wo.") tensor_name = tensor_name.replace(".ffn_gate.", ".feed_forward.w1.") tensor_name = tensor_name.replace(".ffn_down.", ".feed_forward.w2.") tensor_name = tensor_name.replace(".ffn_up.", ".feed_forward.w3.") tensor_name = tensor_name.replace(".attn_norm.", ".attention_norm.") return tensor_name def main(): if len(sys.argv) < 2: raise ValueError("missing weight file argument") filename = sys.argv[1] print(f"reading model file {filename}") if filename.endswith("gguf"): all_tensors, metadata = utils.load_gguf(filename) vocab = metadata["tokenizer.ggml.tokens"] for i, v in enumerate(vocab): vocab[i] = "\n" if v == "<0x0A>" else v.replace("▁", " ") hparams = {k: v for (k, v) in metadata.items() if not k.startswith("tokenizer")} print(hparams) hparams = { "n_vocab": len(vocab), "n_embd": metadata["llama.embedding_length"], "n_mult": 256, "n_head": metadata["llama.attention.head_count"], "n_head_kv": metadata["llama.attention.head_count_kv"], "n_layer": metadata["llama.block_count"], "n_rot": metadata["llama.rope.dimension_count"], "rope_freq": metadata.get("llama.rope.freq_base", 10000.0), "ftype": metadata["general.file_type"], "context_length": metadata["llama.context_length"], } all_tensors = {gguf_rename(k): v for k, v in all_tensors.items()} else: all_tensors, hparams, vocab = utils.load_ggml(filename) hparams["context_length"] = 2048 print(hparams) model = QuantizedLlama(hparams, all_tensors) print("model built, starting inference") tokens = [1] for token_idx in range(500): last_token = tokens[-1] lt = candle.tensor([last_token]).unsqueeze(0) logits = model.forward(lt, len(tokens)) # Greedy sampling for now # pr = candle.nn.softmax(logits, -1) m = logits.get(0).argmax_keepdim(-1) next_token = m.values()[0] print(vocab[next_token], end="", flush=True) tokens.append(next_token) if __name__ == "__main__": main()
candle/candle-pyo3/quant-llama.py/0
{ "file_path": "candle/candle-pyo3/quant-llama.py", "repo_id": "candle", "token_count": 1318 }
39
# candle-transformers
candle/candle-transformers/README.md/0
{ "file_path": "candle/candle-transformers/README.md", "repo_id": "candle", "token_count": 6 }
40
/// Adapted from https://github.com/descriptinc/descript-audio-codec use crate::models::encodec; use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, VarBuilder}; #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { pub num_codebooks: usize, pub model_bitrate: u32, pub codebook_size: usize, pub latent_dim: usize, pub frame_rate: u32, pub sampling_rate: u32, } #[derive(Debug, Clone)] pub struct Snake1d { alpha: Tensor, } impl Snake1d { pub fn new(channels: usize, vb: VarBuilder) -> Result<Self> { let alpha = vb.get((1, channels, 1), "alpha")?; Ok(Self { alpha }) } } impl candle::Module for Snake1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs_shape = xs.shape(); let xs = xs.flatten_from(2)?; let sin = self.alpha.broadcast_mul(&xs)?.sin()?; let sin = (&sin * &sin)?; (xs + (&self.alpha + 1e-9)?.recip()?.broadcast_mul(&sin)?)?.reshape(xs_shape) } } #[derive(Debug, Clone)] pub struct ResidualUnit { snake1: Snake1d, conv1: Conv1d, snake2: Snake1d, conv2: Conv1d, } impl ResidualUnit { pub fn new(dim: usize, dilation: usize, vb: VarBuilder) -> Result<Self> { let pad = ((7 - 1) * dilation) / 2; let vb = vb.pp("block"); let snake1 = Snake1d::new(dim, vb.pp(0))?; let cfg1 = Conv1dConfig { dilation, padding: pad, ..Default::default() }; let conv1 = encodec::conv1d_weight_norm(dim, dim, 7, cfg1, vb.pp(1))?; let snake2 = Snake1d::new(dim, vb.pp(2))?; let conv2 = encodec::conv1d_weight_norm(dim, dim, 1, Default::default(), vb.pp(3))?; Ok(Self { snake1, conv1, snake2, conv2, }) } } impl candle::Module for ResidualUnit { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let ys = xs .apply(&self.snake1)? .apply(&self.conv1)? .apply(&self.snake2)? .apply(&self.conv2)?; let pad = (xs.dim(D::Minus1)? - ys.dim(D::Minus1)?) / 2; if pad > 0 { &ys + xs.narrow(D::Minus1, pad, ys.dim(D::Minus1)?) } else { ys + xs } } } #[derive(Debug, Clone)] pub struct EncoderBlock { res1: ResidualUnit, res2: ResidualUnit, res3: ResidualUnit, snake1: Snake1d, conv1: Conv1d, } impl EncoderBlock { pub fn new(dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("block"); let res1 = ResidualUnit::new(dim / 2, 1, vb.pp(0))?; let res2 = ResidualUnit::new(dim / 2, 3, vb.pp(1))?; let res3 = ResidualUnit::new(dim / 2, 9, vb.pp(2))?; let snake1 = Snake1d::new(dim / 2, vb.pp(3))?; let cfg1 = Conv1dConfig { stride, padding: (stride + 1) / 2, ..Default::default() }; let conv1 = encodec::conv1d_weight_norm(dim / 2, dim, 2 * stride, cfg1, vb.pp(4))?; Ok(Self { res1, res2, res3, snake1, conv1, }) } } impl candle::Module for EncoderBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.res1)? .apply(&self.res2)? .apply(&self.res3)? .apply(&self.snake1)? .apply(&self.conv1) } } #[derive(Debug, Clone)] pub struct Encoder { conv1: Conv1d, blocks: Vec<EncoderBlock>, snake1: Snake1d, conv2: Conv1d, } impl candle::Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.conv1)?; for block in self.blocks.iter() { xs = xs.apply(block)? } xs.apply(&self.snake1)?.apply(&self.conv2) } } impl Encoder { pub fn new( mut d_model: usize, strides: &[usize], d_latent: usize, vb: VarBuilder, ) -> Result<Self> { let vb = vb.pp("block"); let cfg1 = Conv1dConfig { padding: 3, ..Default::default() }; let conv1 = encodec::conv1d_weight_norm(1, d_model, 7, cfg1, vb.pp(0))?; let mut blocks = Vec::with_capacity(strides.len()); for (block_idx, stride) in strides.iter().enumerate() { d_model *= 2; let block = EncoderBlock::new(d_model, *stride, vb.pp(block_idx + 1))?; blocks.push(block) } let snake1 = Snake1d::new(d_model, vb.pp(strides.len() + 1))?; let cfg2 = Conv1dConfig { padding: 1, ..Default::default() }; let conv2 = encodec::conv1d_weight_norm(d_model, d_latent, 3, cfg2, vb.pp(strides.len() + 2))?; Ok(Self { conv1, blocks, snake1, conv2, }) } } #[derive(Debug, Clone)] pub struct DecoderBlock { snake1: Snake1d, conv_tr1: ConvTranspose1d, res1: ResidualUnit, res2: ResidualUnit, res3: ResidualUnit, } impl DecoderBlock { pub fn new(in_dim: usize, out_dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("block"); let snake1 = Snake1d::new(in_dim, vb.pp(0))?; let cfg = ConvTranspose1dConfig { stride, padding: (stride + 1) / 2, ..Default::default() }; let conv_tr1 = encodec::conv_transpose1d_weight_norm( in_dim, out_dim, 2 * stride, true, cfg, vb.pp(1), )?; let res1 = ResidualUnit::new(out_dim, 1, vb.pp(2))?; let res2 = ResidualUnit::new(out_dim, 3, vb.pp(3))?; let res3 = ResidualUnit::new(out_dim, 9, vb.pp(4))?; Ok(Self { snake1, conv_tr1, res1, res2, res3, }) } } impl candle_nn::Module for DecoderBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.snake1)? .apply(&self.conv_tr1)? .apply(&self.res1)? .apply(&self.res2)? .apply(&self.res3) } } #[derive(Debug, Clone)] pub struct Decoder { conv1: Conv1d, blocks: Vec<DecoderBlock>, snake1: Snake1d, conv2: Conv1d, } impl Decoder { pub fn new( in_c: usize, mut channels: usize, rates: &[usize], d_out: usize, vb: VarBuilder, ) -> Result<Self> { let vb = vb.pp("model"); let cfg1 = Conv1dConfig { padding: 3, ..Default::default() }; let conv1 = encodec::conv1d_weight_norm(in_c, channels, 7, cfg1, vb.pp(0))?; let mut blocks = Vec::with_capacity(rates.len()); for (idx, stride) in rates.iter().enumerate() { let block = DecoderBlock::new(channels, channels / 2, *stride, vb.pp(idx + 1))?; channels /= 2; blocks.push(block) } let snake1 = Snake1d::new(channels, vb.pp(rates.len() + 1))?; let conv2 = encodec::conv1d_weight_norm(channels, d_out, 7, cfg1, vb.pp(rates.len() + 2))?; Ok(Self { conv1, blocks, snake1, conv2, }) } } impl candle::Module for Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.conv1)?; for block in self.blocks.iter() { xs = xs.apply(block)? } xs.apply(&self.snake1)?.apply(&self.conv2) } } #[allow(unused)] #[derive(Clone, Debug)] pub struct VectorQuantizer { in_proj: Conv1d, out_proj: Conv1d, codebook: candle_nn::Embedding, } impl VectorQuantizer { pub fn new(in_dim: usize, cb_size: usize, cb_dim: usize, vb: VarBuilder) -> Result<Self> { let in_proj = encodec::conv1d_weight_norm(in_dim, cb_dim, 1, Default::default(), vb.pp("in_proj"))?; let out_proj = encodec::conv1d_weight_norm(cb_dim, in_dim, 1, Default::default(), vb.pp("out_proj"))?; let codebook = candle_nn::embedding(cb_size, cb_dim, vb.pp("codebook"))?; Ok(Self { in_proj, out_proj, codebook, }) } pub fn embed_code(&self, embed_id: &Tensor) -> Result<Tensor> { embed_id.apply(&self.codebook) } pub fn decode_code(&self, embed_id: &Tensor) -> Result<Tensor> { self.embed_code(embed_id)?.transpose(1, 2) } } #[derive(Clone, Debug)] pub struct ResidualVectorQuantizer { quantizers: Vec<VectorQuantizer>, } impl ResidualVectorQuantizer { pub fn new( input_dim: usize, n_codebooks: usize, cb_size: usize, cb_dim: usize, vb: VarBuilder, ) -> Result<Self> { let vb = &vb.pp("quantizers"); let quantizers = (0..n_codebooks) .map(|i| VectorQuantizer::new(input_dim, cb_size, cb_dim, vb.pp(i))) .collect::<Result<Vec<_>>>()?; Ok(Self { quantizers }) } pub fn from_codes(&self, codes: &Tensor) -> Result<Tensor> { let mut sum = None; for (idx, quantizer) in self.quantizers.iter().enumerate() { let z_p_i = quantizer.decode_code(&codes.i((.., idx))?)?; let z_q_i = z_p_i.apply(&quantizer.out_proj)?; let s = match sum { None => z_q_i, Some(s) => (s + z_q_i)?, }; sum = Some(s) } match sum { Some(s) => Ok(s), None => candle::bail!("empty codebooks"), } } } #[derive(Debug, Clone)] pub struct Model { pub encoder: Encoder, pub quantizer: ResidualVectorQuantizer, pub decoder: Decoder, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("model"); let encoder = Encoder::new(64, &[2, 4, 8, 8], cfg.latent_dim, vb.pp("encoder"))?; let quantizer = ResidualVectorQuantizer::new( cfg.latent_dim, cfg.num_codebooks, cfg.codebook_size, 8, vb.pp("quantizer"), )?; let decoder = Decoder::new(cfg.latent_dim, 1536, &[8, 8, 4, 2], 1, vb.pp("decoder"))?; Ok(Self { encoder, decoder, quantizer, }) } pub fn decode_codes(&self, audio_codes: &Tensor) -> Result<Tensor> { let audio_values = self.quantizer.from_codes(audio_codes)?; audio_values.apply(&self.decoder) } }
candle/candle-transformers/src/models/dac.rs/0
{ "file_path": "candle/candle-transformers/src/models/dac.rs", "repo_id": "candle", "token_count": 5651 }
41
use candle::{Module, Result, Tensor, D}; use candle_nn as nn; use super::projections::{AttnProjections, Mlp, Qkv, QkvOnlyAttnProjections}; pub struct ModulateIntermediates { gate_msa: Tensor, shift_mlp: Tensor, scale_mlp: Tensor, gate_mlp: Tensor, } pub struct DiTBlock { norm1: LayerNormNoAffine, attn: AttnProjections, norm2: LayerNormNoAffine, mlp: Mlp, ada_ln_modulation: nn::Sequential, } pub struct LayerNormNoAffine { eps: f64, } impl LayerNormNoAffine { pub fn new(eps: f64) -> Self { Self { eps } } } impl Module for LayerNormNoAffine { fn forward(&self, x: &Tensor) -> Result<Tensor> { nn::LayerNorm::new_no_bias(Tensor::ones_like(x)?, self.eps).forward(x) } } impl DiTBlock { pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> { // {'hidden_size': 1536, 'num_heads': 24} let norm1 = LayerNormNoAffine::new(1e-6); let attn = AttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?; let norm2 = LayerNormNoAffine::new(1e-6); let mlp_ratio = 4; let mlp = Mlp::new(hidden_size, hidden_size * mlp_ratio, vb.pp("mlp"))?; let n_mods = 6; let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear( hidden_size, n_mods * hidden_size, vb.pp("adaLN_modulation.1"), )?); Ok(Self { norm1, attn, norm2, mlp, ada_ln_modulation, }) } pub fn pre_attention(&self, x: &Tensor, c: &Tensor) -> Result<(Qkv, ModulateIntermediates)> { let modulation = self.ada_ln_modulation.forward(c)?; let chunks = modulation.chunk(6, D::Minus1)?; let (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp) = ( chunks[0].clone(), chunks[1].clone(), chunks[2].clone(), chunks[3].clone(), chunks[4].clone(), chunks[5].clone(), ); let norm_x = self.norm1.forward(x)?; let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?; let qkv = self.attn.pre_attention(&modulated_x)?; Ok(( qkv, ModulateIntermediates { gate_msa, shift_mlp, scale_mlp, gate_mlp, }, )) } pub fn post_attention( &self, attn: &Tensor, x: &Tensor, mod_interm: &ModulateIntermediates, ) -> Result<Tensor> { let attn_out = self.attn.post_attention(attn)?; let x = x.add(&attn_out.broadcast_mul(&mod_interm.gate_msa.unsqueeze(1)?)?)?; let norm_x = self.norm2.forward(&x)?; let modulated_x = modulate(&norm_x, &mod_interm.shift_mlp, &mod_interm.scale_mlp)?; let mlp_out = self.mlp.forward(&modulated_x)?; let x = x.add(&mlp_out.broadcast_mul(&mod_interm.gate_mlp.unsqueeze(1)?)?)?; Ok(x) } } pub struct QkvOnlyDiTBlock { norm1: LayerNormNoAffine, attn: QkvOnlyAttnProjections, ada_ln_modulation: nn::Sequential, } impl QkvOnlyDiTBlock { pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> { let norm1 = LayerNormNoAffine::new(1e-6); let attn = QkvOnlyAttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?; let n_mods = 2; let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear( hidden_size, n_mods * hidden_size, vb.pp("adaLN_modulation.1"), )?); Ok(Self { norm1, attn, ada_ln_modulation, }) } pub fn pre_attention(&self, x: &Tensor, c: &Tensor) -> Result<Qkv> { let modulation = self.ada_ln_modulation.forward(c)?; let chunks = modulation.chunk(2, D::Minus1)?; let (shift_msa, scale_msa) = (chunks[0].clone(), chunks[1].clone()); let norm_x = self.norm1.forward(x)?; let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?; self.attn.pre_attention(&modulated_x) } } pub struct FinalLayer { norm_final: LayerNormNoAffine, linear: nn::Linear, ada_ln_modulation: nn::Sequential, } impl FinalLayer { pub fn new( hidden_size: usize, patch_size: usize, out_channels: usize, vb: nn::VarBuilder, ) -> Result<Self> { let norm_final = LayerNormNoAffine::new(1e-6); let linear = nn::linear( hidden_size, patch_size * patch_size * out_channels, vb.pp("linear"), )?; let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear( hidden_size, 2 * hidden_size, vb.pp("adaLN_modulation.1"), )?); Ok(Self { norm_final, linear, ada_ln_modulation, }) } pub fn forward(&self, x: &Tensor, c: &Tensor) -> Result<Tensor> { let modulation = self.ada_ln_modulation.forward(c)?; let chunks = modulation.chunk(2, D::Minus1)?; let (shift, scale) = (chunks[0].clone(), chunks[1].clone()); let norm_x = self.norm_final.forward(x)?; let modulated_x = modulate(&norm_x, &shift, &scale)?; let output = self.linear.forward(&modulated_x)?; Ok(output) } } fn modulate(x: &Tensor, shift: &Tensor, scale: &Tensor) -> Result<Tensor> { let shift = shift.unsqueeze(1)?; let scale = scale.unsqueeze(1)?; let scale_plus_one = scale.add(&Tensor::ones_like(&scale)?)?; shift.broadcast_add(&x.broadcast_mul(&scale_plus_one)?) } pub struct JointBlock { x_block: DiTBlock, context_block: DiTBlock, num_heads: usize, } impl JointBlock { pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> { let x_block = DiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?; let context_block = DiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?; Ok(Self { x_block, context_block, num_heads, }) } pub fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)> { let (context_qkv, context_interm) = self.context_block.pre_attention(context, c)?; let (x_qkv, x_interm) = self.x_block.pre_attention(x, c)?; let (context_attn, x_attn) = joint_attn(&context_qkv, &x_qkv, self.num_heads)?; let context_out = self.context_block .post_attention(&context_attn, context, &context_interm)?; let x_out = self.x_block.post_attention(&x_attn, x, &x_interm)?; Ok((context_out, x_out)) } } pub struct ContextQkvOnlyJointBlock { x_block: DiTBlock, context_block: QkvOnlyDiTBlock, num_heads: usize, } impl ContextQkvOnlyJointBlock { pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> { let x_block = DiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?; let context_block = QkvOnlyDiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?; Ok(Self { x_block, context_block, num_heads, }) } pub fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<Tensor> { let context_qkv = self.context_block.pre_attention(context, c)?; let (x_qkv, x_interm) = self.x_block.pre_attention(x, c)?; let (_, x_attn) = joint_attn(&context_qkv, &x_qkv, self.num_heads)?; let x_out = self.x_block.post_attention(&x_attn, x, &x_interm)?; Ok(x_out) } } // A QKV-attention that is compatible with the interface of candle_flash_attn::flash_attn // Flash attention regards q, k, v dimensions as (batch_size, seqlen, nheads, headdim) fn flash_compatible_attention( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, ) -> Result<Tensor> { let q_dims_for_matmul = q.transpose(1, 2)?.dims().to_vec(); let rank = q_dims_for_matmul.len(); let q = q.transpose(1, 2)?.flatten_to(rank - 3)?; let k = k.transpose(1, 2)?.flatten_to(rank - 3)?; let v = v.transpose(1, 2)?.flatten_to(rank - 3)?; let attn_weights = (q.matmul(&k.t()?)? * softmax_scale as f64)?; let attn_scores = candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(&v)?; attn_scores.reshape(q_dims_for_matmul)?.transpose(1, 2) } fn joint_attn(context_qkv: &Qkv, x_qkv: &Qkv, num_heads: usize) -> Result<(Tensor, Tensor)> { let qkv = Qkv { q: Tensor::cat(&[&context_qkv.q, &x_qkv.q], 1)?, k: Tensor::cat(&[&context_qkv.k, &x_qkv.k], 1)?, v: Tensor::cat(&[&context_qkv.v, &x_qkv.v], 1)?, }; let (batch_size, seqlen, _) = qkv.q.dims3()?; let qkv = Qkv { q: qkv.q.reshape((batch_size, seqlen, num_heads, ()))?, k: qkv.k.reshape((batch_size, seqlen, num_heads, ()))?, v: qkv.v, }; let headdim = qkv.q.dim(D::Minus1)?; let softmax_scale = 1.0 / (headdim as f64).sqrt(); // let attn: Tensor = candle_flash_attn::flash_attn(&qkv.q, &qkv.k, &qkv.v, softmax_scale as f32, false)?; let attn = flash_compatible_attention(&qkv.q, &qkv.k, &qkv.v, softmax_scale as f32)?; let attn = attn.reshape((batch_size, seqlen, ()))?; let context_qkv_seqlen = context_qkv.q.dim(1)?; let context_attn = attn.narrow(1, 0, context_qkv_seqlen)?; let x_attn = attn.narrow(1, context_qkv_seqlen, seqlen - context_qkv_seqlen)?; Ok((context_attn, x_attn)) }
candle/candle-transformers/src/models/mmdit/blocks.rs/0
{ "file_path": "candle/candle-transformers/src/models/mmdit/blocks.rs", "repo_id": "candle", "token_count": 4866 }
42
use crate::models::with_tracing::{layer_norm, linear, Embedding, LayerNorm, Linear}; /// Phi model. /// https://huggingface.co/microsoft/phi-2 /// There is an alternative implementation of the phi model in mixformers.rs. /// This corresponds to the model update made with the following commit: /// https://huggingface.co/microsoft/phi-2/commit/cb2f4533604d8b67de604e7df03bfe6f3ca22869 use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; // https://huggingface.co/microsoft/phi-2/blob/main/configuration_phi.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) hidden_size: usize, pub(crate) intermediate_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: Option<usize>, pub(crate) hidden_act: Activation, pub(crate) max_position_embeddings: usize, pub(crate) layer_norm_eps: f64, pub(crate) tie_word_embeddings: bool, pub(crate) rope_theta: f32, pub(crate) partial_rotary_factor: f64, pub(crate) qk_layernorm: bool, } impl Config { fn num_key_value_heads(&self) -> usize { self.num_key_value_heads.unwrap_or(self.num_attention_heads) } fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } } #[derive(Debug, Clone)] struct RotaryEmbedding { dim: usize, sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dev: &Device) -> Result<Self> { let dim = (cfg.partial_rotary_factor * cfg.head_dim() as f64) as usize; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, cfg.max_position_embeddings as u32, dev)? .to_dtype(DType::F32)? .reshape((cfg.max_position_embeddings, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { dim, sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, _num_heads, seq_len, _headdim) = xs.dims4()?; let xs_rot = xs.i((.., .., .., ..self.dim))?.contiguous()?; let xs_pass = xs.i((.., .., .., self.dim..))?; let c = self.cos.narrow(0, seqlen_offset, seq_len)?; let s = self.sin.narrow(0, seqlen_offset, seq_len)?; let xs_rot = candle_nn::rotary_emb::rope(&xs_rot, &c, &s)?; Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { fc1: Linear, fc2: Linear, act: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?; let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { fc1, fc2, // This does not match the mixformers implementation where Gelu is used rather than // GeluNew. act: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, dense: Linear, kv_cache: Option<(Tensor, Tensor)>, q_layernorm: Option<LayerNorm>, k_layernorm: Option<LayerNorm>, rotary_emb: RotaryEmbedding, softmax_scale: f64, num_heads: usize, num_kv_heads: usize, head_dim: usize, span: tracing::Span, } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads(); let head_dim = cfg.head_dim(); let q_proj = linear(cfg.hidden_size, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear(cfg.hidden_size, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear(cfg.hidden_size, num_kv_heads * head_dim, vb.pp("v_proj"))?; let dense = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("dense"))?; // Alternative rope scalings are not supported. let rotary_emb = RotaryEmbedding::new(cfg, vb.device())?; let (q_layernorm, k_layernorm) = if cfg.qk_layernorm { let q_layernorm = layer_norm(head_dim, cfg.layer_norm_eps, vb.pp("q_layernorm"))?; let k_layernorm = layer_norm(head_dim, cfg.layer_norm_eps, vb.pp("k_layernorm"))?; (Some(q_layernorm), Some(k_layernorm)) } else { (None, None) }; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); Ok(Self { q_proj, k_proj, v_proj, dense, kv_cache: None, q_layernorm, k_layernorm, rotary_emb, softmax_scale, num_heads, num_kv_heads, head_dim, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> { crate::utils::repeat_kv(xs, self.num_heads / self.num_kv_heads) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = match &self.q_layernorm { None => query_states, Some(ln) => query_states.apply(ln)?, }; let key_states = match &self.k_layernorm { None => key_states, Some(ln) => key_states.apply(ln)?, }; let query_states = query_states .reshape((b_size, seq_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_size, seq_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_size, seq_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; // Rotary embeddings. let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(2)?, }; let query_states = self .rotary_emb .apply_rotary_emb(&query_states, seqlen_offset)?; let key_states = self .rotary_emb .apply_rotary_emb(&key_states, seqlen_offset)?; // KV cache. let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key_states], 2)?; let v = Tensor::cat(&[prev_v, &value_states], 2)?; (k, v) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); // Repeat kv. let key_states = self.repeat_kv(key_states)?.contiguous()?; let value_states = self.repeat_kv(value_states)?.contiguous()?; let attn_weights = (query_states .to_dtype(DType::F32)? .contiguous()? .matmul(&key_states.to_dtype(DType::F32)?.t()?)? * self.softmax_scale)?; let attn_weights = match mask { None => attn_weights, Some(mask) => masked_fill( &attn_weights, &mask.broadcast_left((b_size, self.num_heads))?, f32::NEG_INFINITY, )?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?.to_dtype(value_states.dtype())?; let attn_output = attn_weights.matmul(&value_states)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_size, seq_len, ()))?; attn_output.apply(&self.dense) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, span: tracing::Span, } impl DecoderLayer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = xs.apply(&self.input_layernorm)?; let attn_outputs = self.self_attn.forward(&xs, mask)?; let feed_forward_hidden_states = self.mlp.forward(&xs)?; attn_outputs + feed_forward_hidden_states + residual } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, final_layernorm: LayerNorm, lm_head: Linear, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let final_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("final_layernorm"), )?; let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_m = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(cfg, vb_m.pp(layer_idx))?; layers.push(layer) } let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, final_layernorm, lm_head, span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.embed_tokens)?; let mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, mask.as_ref())?; } xs.apply(&self.final_layernorm)? .narrow(1, seq_len - 1, 1)? .apply(&self.lm_head)? .squeeze(1) } pub fn clear_kv_cache(&mut self) { self.layers.iter_mut().for_each(|b| b.clear_kv_cache()) } }
candle/candle-transformers/src/models/phi.rs/0
{ "file_path": "candle/candle-transformers/src/models/phi.rs", "repo_id": "candle", "token_count": 6064 }
43
// Adapted from: // https://github.com/ChaoningZhang/MobileSAM/blob/master/mobile_sam/modeling/tiny_vit_sam.py use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{Conv2dConfig, Module, VarBuilder}; const MBCONV_EXPAND_RATIO: usize = 4; const MLP_RATIO: usize = 4; const LOCAL_CONV_SIZE: usize = 3; const IMG_SIZE: usize = 1024; const IN_CHANNELS: usize = 3; #[derive(Debug)] struct Conv2dBN { c: candle_nn::Conv2d, bn: candle_nn::BatchNorm, span: tracing::Span, } impl Conv2dBN { fn new(in_: usize, out: usize, ks: usize, cfg: Conv2dConfig, vb: VarBuilder) -> Result<Self> { let c = candle_nn::conv2d_no_bias(in_, out, ks, cfg, vb.pp("c"))?; let bn = candle_nn::batch_norm(out, 1e-5, vb.pp("bn"))?; let span = tracing::span!(tracing::Level::TRACE, "conv2d-bn"); Ok(Self { c, bn, span }) } } impl Module for Conv2dBN { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.c)?.apply_t(&self.bn, false) } } #[derive(Debug)] struct PatchEmbed { conv1: Conv2dBN, conv2: Conv2dBN, span: tracing::Span, } impl PatchEmbed { fn new(in_chans: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { stride: 2, padding: 1, ..Default::default() }; let conv1 = Conv2dBN::new(in_chans, embed_dim / 2, 3, cfg, vb.pp("seq.0"))?; let conv2 = Conv2dBN::new(embed_dim / 2, embed_dim, 3, cfg, vb.pp("seq.2"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-embed"); Ok(Self { conv1, conv2, span }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.conv1)?.gelu()?.apply(&self.conv2) } } #[derive(Debug)] struct MBConv { conv1: Conv2dBN, conv2: Conv2dBN, conv3: Conv2dBN, span: tracing::Span, } impl MBConv { fn new(in_: usize, out: usize, expand_ratio: usize, vb: VarBuilder) -> Result<Self> { let hidden = in_ * expand_ratio; let cfg2 = candle_nn::Conv2dConfig { padding: 1, groups: hidden, ..Default::default() }; let conv1 = Conv2dBN::new(in_, hidden, 1, Default::default(), vb.pp("conv1"))?; let conv2 = Conv2dBN::new(hidden, hidden, 3, cfg2, vb.pp("conv2"))?; let conv3 = Conv2dBN::new(hidden, out, 1, Default::default(), vb.pp("conv3"))?; let span = tracing::span!(tracing::Level::TRACE, "mb-conv"); Ok(Self { conv1, conv2, conv3, span, }) } } impl Module for MBConv { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut = xs; let xs = xs .apply(&self.conv1)? .gelu()? .apply(&self.conv2)? .gelu()? .apply(&self.conv3)?; (xs + shortcut)?.gelu() } } #[derive(Debug)] struct PatchMerging { conv1: Conv2dBN, conv2: Conv2dBN, conv3: Conv2dBN, input_resolution: (usize, usize), span: tracing::Span, } impl PatchMerging { fn new( input_resolution: (usize, usize), dim: usize, out: usize, vb: VarBuilder, ) -> Result<Self> { let stride = if [320, 448, 576].contains(&out) { 1 } else { 2 }; let cfg2 = candle_nn::Conv2dConfig { padding: 1, stride, groups: out, ..Default::default() }; let conv1 = Conv2dBN::new(dim, out, 1, Default::default(), vb.pp("conv1"))?; let conv2 = Conv2dBN::new(out, out, 3, cfg2, vb.pp("conv2"))?; let conv3 = Conv2dBN::new(out, out, 1, Default::default(), vb.pp("conv3"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-merging"); Ok(Self { conv1, conv2, conv3, input_resolution, span, }) } } impl Module for PatchMerging { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = if xs.rank() == 3 { let (h, w) = self.input_resolution; let b = xs.dim(0)?; xs.reshape((b, h, w, ()))?.permute((0, 3, 1, 2))? } else { xs.clone() }; xs.apply(&self.conv1)? .gelu()? .apply(&self.conv2)? .gelu()? .apply(&self.conv3)? .flatten_from(2)? .transpose(1, 2) } } #[derive(Debug)] struct ConvLayer { blocks: Vec<MBConv>, downsample: Option<PatchMerging>, span: tracing::Span, } impl ConvLayer { fn new( dim: usize, out: usize, input_resolution: (usize, usize), depth: usize, downsample: bool, conv_expand_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let block = MBConv::new(dim, dim, conv_expand_ratio, vb_b.pp(index))?; blocks.push(block) } let downsample = if downsample { let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "conv-layer"); Ok(Self { blocks, downsample, span, }) } } impl Module for ConvLayer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for block in self.blocks.iter() { xs = block.forward(&xs)? } match &self.downsample { None => Ok(xs), Some(downsample) => downsample.forward(&xs), } } } #[derive(Debug)] struct Mlp { norm: candle_nn::LayerNorm, fc1: super::Linear, fc2: super::Linear, span: tracing::Span, } impl Mlp { fn new(in_: usize, hidden: usize, vb: VarBuilder) -> Result<Self> { let norm = candle_nn::layer_norm(in_, 1e-5, vb.pp("norm"))?; let fc1 = super::linear(vb.pp("fc1"), in_, hidden, true)?; let fc2 = super::linear(vb.pp("fc2"), hidden, in_, true)?; let span = tracing::span!(tracing::Level::TRACE, "mlp"); Ok(Self { norm, fc1, fc2, span, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.norm)? .apply(&self.fc1)? .gelu()? .apply(&self.fc2) } } #[derive(Debug)] struct Attention { norm: candle_nn::LayerNorm, qkv: super::Linear, proj: super::Linear, ab: Tensor, key_dim: usize, num_heads: usize, d: usize, dh: usize, scale: f64, span: tracing::Span, span_matmul: tracing::Span, span_softmax: tracing::Span, } impl Attention { fn new( dim: usize, key_dim: usize, num_heads: usize, attn_ratio: usize, resolution: (usize, usize), vb: VarBuilder, ) -> Result<Self> { let d = attn_ratio * key_dim; let dh = d * num_heads; let nh_kd = key_dim * num_heads; let h = dh + nh_kd * 2; let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?; let qkv = super::linear(vb.pp("qkv"), dim, h, true)?; let proj = super::linear(vb.pp("proj"), dh, dim, true)?; let points = (0..resolution.0) .flat_map(|x| (0..resolution.1).map(move |y| (x as i64, y as i64))) .collect::<Vec<_>>(); let mut idxs = Vec::with_capacity(points.len() * points.len()); let mut attention_offsets = std::collections::HashMap::new(); for &(x1, y1) in points.iter() { for &(x2, y2) in points.iter() { let offset = ((x2 - x1).abs(), (y2 - y1).abs()); let l = attention_offsets.len(); let idx = attention_offsets.entry(offset).or_insert(l); idxs.push(*idx as u32) } } let attention_biases = vb.get((num_heads, attention_offsets.len()), "attention_biases")?; let idxs = Tensor::new(idxs, attention_biases.device())?; let ab = attention_biases .index_select(&idxs, 1)? .reshape(((), points.len(), points.len()))?; let span = tracing::span!(tracing::Level::TRACE, "attention"); let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul"); let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm"); Ok(Self { norm, qkv, proj, ab, key_dim, num_heads, d, dh, scale: 1f64 / (key_dim as f64).sqrt(), span, span_matmul, span_softmax, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, n, _) = xs.dims3()?; let xs = xs.apply(&self.norm)?; let qkv = xs.apply(&self.qkv)?.reshape((b, n, self.num_heads, ()))?; let q = qkv .narrow(D::Minus1, 0, self.key_dim)? .permute((0, 2, 1, 3))? .contiguous()?; let k = qkv .narrow(D::Minus1, self.key_dim, self.key_dim)? .permute((0, 2, 1, 3))? .contiguous()?; let v = qkv .narrow(D::Minus1, 2 * self.key_dim, self.d)? .permute((0, 2, 1, 3))? .contiguous()?; let attn = { let _enter = self.span_matmul.enter(); (q.matmul(&k.t()?)? * self.scale)? }; let attn = attn.broadcast_add(&self.ab)?; let attn = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn)? }; let attn = { let _enter = self.span_matmul.enter(); attn.matmul(&v)? }; attn.transpose(1, 2)? .reshape((b, n, self.dh))? .apply(&self.proj) } } #[derive(Debug)] struct TinyViTBlock { attn: Attention, local_conv: Conv2dBN, mlp: Mlp, window_size: usize, input_resolution: (usize, usize), span: tracing::Span, } impl TinyViTBlock { fn new( dim: usize, input_resolution: (usize, usize), num_heads: usize, window_size: usize, vb: VarBuilder, ) -> Result<Self> { let head_dim = dim / num_heads; let attn = Attention::new( dim, head_dim, num_heads, 1, (window_size, window_size), vb.pp("attn"), )?; let mlp = Mlp::new(dim, dim * MLP_RATIO, vb.pp("mlp"))?; let cfg = candle_nn::Conv2dConfig { padding: LOCAL_CONV_SIZE / 2, groups: dim, ..Default::default() }; let local_conv = Conv2dBN::new(dim, dim, LOCAL_CONV_SIZE, cfg, vb.pp("local_conv"))?; let span = tracing::span!(tracing::Level::TRACE, "attention"); Ok(Self { attn, local_conv, mlp, window_size, input_resolution, span, }) } } impl Module for TinyViTBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (h, w) = self.input_resolution; let (b, l, c) = xs.dims3()?; let res_x = xs; let xs = if h == self.window_size && w == self.window_size { self.attn.forward(xs)? } else { let xs = xs.reshape((b, h, w, c))?; let pad_b = (self.window_size - h % self.window_size) % self.window_size; let pad_r = (self.window_size - w % self.window_size) % self.window_size; let xs = if pad_b > 0 { xs.pad_with_zeros(1, 0, pad_b)? } else { xs }; let xs = if pad_r > 0 { xs.pad_with_zeros(2, 0, pad_r)? } else { xs }; let (p_h, p_w) = (h + pad_b, w + pad_r); let n_h = p_h / self.window_size; let n_w = p_w / self.window_size; let xs = xs .reshape((b, n_h, self.window_size, n_w, self.window_size, c))? .transpose(2, 3)? .reshape((b * n_h * n_w, self.window_size * self.window_size, c))?; let xs = self.attn.forward(&xs)?; let xs = xs .reshape((b, n_h, n_w, self.window_size, self.window_size, c))? .transpose(2, 3)? .reshape((b, p_h, p_w, c))?; let xs = if pad_r > 0 { xs.i((.., .., ..w))?.contiguous()? } else { xs }; let xs = if pad_b > 0 { xs.i((.., ..h, ..))?.contiguous()? } else { xs }; xs.reshape((b, l, c))? }; let xs = (xs + res_x)?; let xs = xs .transpose(1, 2)? .reshape((b, c, h, w))? .apply(&self.local_conv)? .reshape((b, c, l))? .transpose(1, 2)?; &xs + self.mlp.forward(&xs)? } } #[derive(Debug)] struct BasicLayer { blocks: Vec<TinyViTBlock>, downsample: Option<PatchMerging>, span: tracing::Span, } impl BasicLayer { #[allow(clippy::too_many_arguments)] fn new( dim: usize, input_resolution: (usize, usize), depth: usize, num_heads: usize, window_size: usize, downsample: bool, out: usize, vb: VarBuilder, ) -> Result<Self> { let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let block = TinyViTBlock::new( dim, input_resolution, num_heads, window_size, vb_b.pp(index), )?; blocks.push(block) } let downsample = if downsample { let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "basic-layer"); Ok(Self { blocks, downsample, span, }) } } impl Module for BasicLayer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for block in self.blocks.iter() { xs = block.forward(&xs)? } match &self.downsample { None => Ok(xs), Some(downsample) => downsample.forward(&xs), } } } #[derive(Debug)] pub struct TinyViT { patch_embed: PatchEmbed, layer0: ConvLayer, layers: Vec<BasicLayer>, // norm_head: candle_nn::LayerNorm, // head: candle_nn::Linear, neck_conv1: candle_nn::Conv2d, neck_ln1: super::LayerNorm2d, neck_conv2: candle_nn::Conv2d, neck_ln2: super::LayerNorm2d, span: tracing::Span, span_neck: tracing::Span, } impl TinyViT { pub fn new( embed_dims: &[usize], depths: &[usize], num_heads: &[usize], window_sizes: &[usize], _num_classes: usize, vb: VarBuilder, ) -> Result<Self> { let patch_embed = PatchEmbed::new(IN_CHANNELS, embed_dims[0], vb.pp("patch_embed"))?; let patches_resolution = IMG_SIZE / 4; let vb_l = vb.pp("layers"); let layer0 = ConvLayer::new( /* dim */ embed_dims[0], /* out */ embed_dims[1], /* input_resolution */ (patches_resolution, patches_resolution), /* depth */ depths[0], /* downsample */ true, /* conv_expand_ratio */ MBCONV_EXPAND_RATIO, vb_l.pp(0), )?; let num_layers = embed_dims.len(); let mut layers = Vec::with_capacity(num_layers - 1); for i_layer in 1..num_layers { let patches_resolution = patches_resolution / (1 << usize::min(i_layer, 2)); let layer = BasicLayer::new( /* dim */ embed_dims[i_layer], /* input_resolution */ (patches_resolution, patches_resolution), /* depth */ depths[i_layer], /* num_heads */ num_heads[i_layer], /* window_size */ window_sizes[i_layer], /* downsample */ i_layer < num_layers - 1, /* out */ embed_dims[usize::min(i_layer + 1, num_layers - 1)], vb_l.pp(i_layer), )?; layers.push(layer) } let last_embed_dim = embed_dims[embed_dims.len() - 1]; // let norm_head = candle_nn::layer_norm(last_embed_dim, 1e-5, vb.pp("norm_head"))?; // let head = candle_nn::linear(last_embed_dim, num_classes, vb.pp("head"))?; let neck_conv1 = candle_nn::conv2d_no_bias(last_embed_dim, 256, 1, Default::default(), vb.pp("neck.0"))?; let neck_ln1 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.1"))?; let cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let neck_conv2 = candle_nn::conv2d_no_bias(256, 256, 3, cfg, vb.pp("neck.2"))?; let neck_ln2 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.3"))?; let span = tracing::span!(tracing::Level::TRACE, "tiny-vit"); let span_neck = tracing::span!(tracing::Level::TRACE, "neck"); Ok(Self { patch_embed, layer0, layers, neck_conv1, neck_ln1, neck_conv2, neck_ln2, span, span_neck, }) } } impl Module for TinyViT { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.patch_embed.forward(xs)?; let mut xs = self.layer0.forward(&xs)?; for layer in self.layers.iter() { xs = layer.forward(&xs)? } let (b, _, c) = xs.dims3()?; let _enter = self.span_neck.enter(); xs.reshape((b, 64, 64, c))? .permute((0, 3, 1, 2))? .apply(&self.neck_conv1)? .apply(&self.neck_ln1)? .apply(&self.neck_conv2)? .apply(&self.neck_ln2) } } pub fn tiny_vit_5m(vb: VarBuilder) -> Result<TinyViT> { TinyViT::new( /* embed_dims */ &[64, 128, 160, 320], /* depths */ &[2, 2, 6, 2], /* num_heads */ &[2, 4, 5, 10], /* window_sizes */ &[7, 7, 14, 7], /* num_classes */ 1000, vb, ) }
candle/candle-transformers/src/models/segment_anything/tiny_vit.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/tiny_vit.rs", "repo_id": "candle", "token_count": 10372 }
44
use super::common::{AttnBlock, ResBlock, TimestepBlock}; use candle::{DType, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] struct Block { res_block: ResBlock, ts_block: TimestepBlock, attn_block: AttnBlock, } #[derive(Debug)] pub struct WPrior { projection: candle_nn::Conv2d, cond_mapper_lin1: candle_nn::Linear, cond_mapper_lin2: candle_nn::Linear, blocks: Vec<Block>, out_ln: super::common::WLayerNorm, out_conv: candle_nn::Conv2d, c_r: usize, } impl WPrior { #[allow(clippy::too_many_arguments)] pub fn new( c_in: usize, c: usize, c_cond: usize, c_r: usize, depth: usize, nhead: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { let projection = candle_nn::conv2d(c_in, c, 1, Default::default(), vb.pp("projection"))?; let cond_mapper_lin1 = candle_nn::linear(c_cond, c, vb.pp("cond_mapper.0"))?; let cond_mapper_lin2 = candle_nn::linear(c, c, vb.pp("cond_mapper.2"))?; let out_ln = super::common::WLayerNorm::new(c)?; let out_conv = candle_nn::conv2d(c, c_in * 2, 1, Default::default(), vb.pp("out.1"))?; let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let res_block = ResBlock::new(c, 0, 3, vb.pp(format!("blocks.{}", 3 * index)))?; let ts_block = TimestepBlock::new(c, c_r, vb.pp(format!("blocks.{}", 3 * index + 1)))?; let attn_block = AttnBlock::new( c, c, nhead, true, use_flash_attn, vb.pp(format!("blocks.{}", 3 * index + 2)), )?; blocks.push(Block { res_block, ts_block, attn_block, }) } Ok(Self { projection, cond_mapper_lin1, cond_mapper_lin2, blocks, out_ln, out_conv, c_r, }) } pub fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> { const MAX_POSITIONS: usize = 10000; let r = (r * MAX_POSITIONS as f64)?; let half_dim = self.c_r / 2; let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64; let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)? * -emb)? .exp()?; let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?; let emb = if self.c_r % 2 == 1 { emb.pad_with_zeros(D::Minus1, 0, 1)? } else { emb }; emb.to_dtype(r.dtype()) } pub fn forward(&self, xs: &Tensor, r: &Tensor, c: &Tensor) -> Result<Tensor> { let x_in = xs; let mut xs = xs.apply(&self.projection)?; let c_embed = c .apply(&self.cond_mapper_lin1)? .apply(&|xs: &_| candle_nn::ops::leaky_relu(xs, 0.2))? .apply(&self.cond_mapper_lin2)?; let r_embed = self.gen_r_embedding(r)?; for block in self.blocks.iter() { xs = block.res_block.forward(&xs, None)?; xs = block.ts_block.forward(&xs, &r_embed)?; xs = block.attn_block.forward(&xs, &c_embed)?; } let ab = xs.apply(&self.out_ln)?.apply(&self.out_conv)?.chunk(2, 1)?; (x_in - &ab[0])? / ((&ab[1] - 1.)?.abs()? + 1e-5) } }
candle/candle-transformers/src/models/wuerstchen/prior.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/prior.rs", "repo_id": "candle", "token_count": 1920 }
45
use candle_transformers::models::bert; use wasm_bindgen::prelude::*; pub use bert::{BertModel, Config, DTYPE}; pub use tokenizers::{PaddingParams, Tokenizer}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string())) }
candle/candle-wasm-examples/bert/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/bert/src/lib.rs", "repo_id": "candle", "token_count": 226 }
46
use crate::console_log; use crate::worker::{ModelData, Worker, WorkerInput, WorkerOutput}; use std::str::FromStr; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use yew::{html, Component, Context, Html}; use yew_agent::{Bridge, Bridged}; async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> { use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response}; let window = web_sys::window().ok_or("window")?; let opts = RequestInit::new(); opts.set_method("GET"); opts.set_mode(RequestMode::Cors); opts.set_cache(RequestCache::NoCache); let request = Request::new_with_str_and_init(url, &opts)?; let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?; // `resp_value` is a `Response` object. assert!(resp_value.is_instance_of::<Response>()); let resp: Response = resp_value.dyn_into()?; let data = JsFuture::from(resp.blob()?).await?; let blob = web_sys::Blob::from(data); let array_buffer = JsFuture::from(blob.array_buffer()).await?; let data = js_sys::Uint8Array::new(&array_buffer).to_vec(); Ok(data) } pub enum Msg { Refresh, Run, UpdateStatus(String), SetModel(ModelData), WorkerIn(WorkerInput), WorkerOut(Result<WorkerOutput, String>), } pub struct CurrentDecode { start_time: Option<f64>, } pub struct App { status: String, loaded: bool, temperature: std::rc::Rc<std::cell::RefCell<f64>>, top_p: std::rc::Rc<std::cell::RefCell<f64>>, prompt: std::rc::Rc<std::cell::RefCell<String>>, generated: String, n_tokens: usize, current_decode: Option<CurrentDecode>, worker: Box<dyn Bridge<Worker>>, } async fn model_data_load() -> Result<ModelData, JsValue> { let tokenizer = fetch_url("tokenizer.json").await?; let model = fetch_url("model.bin").await?; console_log!("{}", model.len()); Ok(ModelData { tokenizer, model }) } fn performance_now() -> Option<f64> { let window = web_sys::window()?; let performance = window.performance()?; Some(performance.now() / 1000.) } impl Component for App { type Message = Msg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let status = "loading weights".to_string(); let cb = { let link = ctx.link().clone(); move |e| link.send_message(Self::Message::WorkerOut(e)) }; let worker = Worker::bridge(std::rc::Rc::new(cb)); Self { status, n_tokens: 0, temperature: std::rc::Rc::new(std::cell::RefCell::new(0.)), top_p: std::rc::Rc::new(std::cell::RefCell::new(1.0)), prompt: std::rc::Rc::new(std::cell::RefCell::new("".to_string())), generated: String::new(), current_decode: None, worker, loaded: false, } } fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) { if first_render { ctx.link().send_future(async { match model_data_load().await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(model_data) => Msg::SetModel(model_data), } }); } } fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetModel(md) => { self.status = "weights loaded successfully!".to_string(); self.loaded = true; console_log!("loaded weights"); self.worker.send(WorkerInput::ModelData(md)); true } Msg::Run => { if self.current_decode.is_some() { self.status = "already generating some sample at the moment".to_string() } else { let start_time = performance_now(); self.current_decode = Some(CurrentDecode { start_time }); self.status = "generating...".to_string(); self.n_tokens = 0; self.generated.clear(); let temp = *self.temperature.borrow(); let top_p = *self.top_p.borrow(); let prompt = self.prompt.borrow().clone(); console_log!("temp: {}, top_p: {}, prompt: {}", temp, top_p, prompt); ctx.link() .send_message(Msg::WorkerIn(WorkerInput::Run(temp, top_p, prompt))) } true } Msg::WorkerOut(output) => { match output { Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(), Ok(WorkerOutput::GenerationDone(Err(err))) => { self.status = format!("error in worker process: {err}"); self.current_decode = None } Ok(WorkerOutput::GenerationDone(Ok(()))) => { let dt = self.current_decode.as_ref().and_then(|current_decode| { current_decode.start_time.and_then(|start_time| { performance_now().map(|stop_time| stop_time - start_time) }) }); self.status = match dt { None => "generation succeeded!".to_string(), Some(dt) => format!( "generation succeeded in {:.2}s ({:.1} ms/token)", dt, dt * 1000.0 / (self.n_tokens as f64) ), }; self.current_decode = None } Ok(WorkerOutput::Generated(token)) => { self.n_tokens += 1; self.generated.push_str(&token) } Err(err) => { self.status = format!("error in worker {err:?}"); } } true } Msg::WorkerIn(inp) => { self.worker.send(inp); true } Msg::UpdateStatus(status) => { self.status = status; true } Msg::Refresh => true, } } fn view(&self, ctx: &Context<Self>) -> Html { use yew::TargetCast; let temperature = self.temperature.clone(); let oninput_temperature = ctx.link().callback(move |e: yew::InputEvent| { let input: web_sys::HtmlInputElement = e.target_unchecked_into(); if let Ok(temp) = f64::from_str(&input.value()) { *temperature.borrow_mut() = temp } Msg::Refresh }); let top_p = self.top_p.clone(); let oninput_top_p = ctx.link().callback(move |e: yew::InputEvent| { let input: web_sys::HtmlInputElement = e.target_unchecked_into(); if let Ok(top_p_input) = f64::from_str(&input.value()) { *top_p.borrow_mut() = top_p_input } Msg::Refresh }); let prompt = self.prompt.clone(); let oninput_prompt = ctx.link().callback(move |e: yew::InputEvent| { let input: web_sys::HtmlInputElement = e.target_unchecked_into(); *prompt.borrow_mut() = input.value(); Msg::Refresh }); html! { <div style="margin: 2%;"> <div><p>{"Running "} <a href="https://github.com/karpathy/llama2.c" target="_blank">{"llama2.c"}</a> {" in the browser using rust/wasm with "} <a href="https://github.com/huggingface/candle" target="_blank">{"candle!"}</a> </p> <p>{"Once the weights have loaded, click on the run button to start generating content."} </p> </div> {"temperature \u{00a0} "} <input type="range" min="0." max="1.2" step="0.1" value={self.temperature.borrow().to_string()} oninput={oninput_temperature} id="temp"/> {format!(" \u{00a0} {}", self.temperature.borrow())} <br/ > {"top_p \u{00a0} "} <input type="range" min="0." max="1.0" step="0.05" value={self.top_p.borrow().to_string()} oninput={oninput_top_p} id="top_p"/> {format!(" \u{00a0} {}", self.top_p.borrow())} <br/ > {"prompt: "}<input type="text" value={self.prompt.borrow().to_string()} oninput={oninput_prompt} id="prompt"/> <br/ > { if self.loaded{ html!(<button class="button" onclick={ctx.link().callback(move |_| Msg::Run)}> { "run" }</button>) }else{ html! { <progress id="progress-bar" aria-label="Loading weights..."></progress> } } } <br/ > <h3> {&self.status} </h3> { if self.current_decode.is_some() { html! { <progress id="progress-bar" aria-label="generating…"></progress> } } else { html! {} } } <blockquote> <p> { self.generated.chars().map(|c| if c == '\r' || c == '\n' { html! { <br/> } } else { html! { {c} } }).collect::<Html>() } </p> </blockquote> </div> } } }
candle/candle-wasm-examples/llama2-c/src/app.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/app.rs", "repo_id": "candle", "token_count": 5448 }
47
//load Candle Bert Module wasm module let init, ModelEncoder; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Encoder { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelEncoder } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelEncoder } = await import("./build/m.js")); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelEncoder( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, } = event.data; try { self.postMessage({ status: "ready", message: "Starting T5 Encoder" }); const model = await Encoder.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "encoding", message: "Encoding Sentences", }); const output = model.decode({ sentences: sentences, normalize_embeddings: normalize_embeddings || true, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js/0
{ "file_path": "candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js", "repo_id": "candle", "token_count": 873 }
48
use candle_wasm_example_whisper::worker::{Decoder as D, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Decoder { decoder: D, } #[wasm_bindgen] impl Decoder { #[wasm_bindgen(constructor)] #[allow(clippy::too_many_arguments)] pub fn new( weights: Vec<u8>, tokenizer: Vec<u8>, mel_filters: Vec<u8>, config: Vec<u8>, quantized: bool, is_multilingual: bool, timestamps: bool, task: Option<String>, language: Option<String>, ) -> Result<Decoder, JsError> { let decoder = D::load(ModelData { tokenizer, mel_filters, config, quantized, weights, is_multilingual, timestamps, task, language, }); match decoder { Ok(decoder) => Ok(Self { decoder }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn decode(&mut self, wav_input: Vec<u8>) -> Result<String, JsError> { let segments = self .decoder .convert_and_run(&wav_input) .map_err(|e| JsError::new(&e.to_string()))?; let json = serde_json::to_string(&segments)?; Ok(json) } } fn main() {}
candle/candle-wasm-examples/whisper/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/m.rs", "repo_id": "candle", "token_count": 694 }
49
mod app; pub mod coco_classes; pub mod model; pub mod worker; pub use app::App; pub use worker::Worker;
candle/candle-wasm-examples/yolo/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/lib.rs", "repo_id": "candle", "token_count": 37 }
50
module.exports = { root: true, parser: "@typescript-eslint/parser", extends: [ "eslint:recommended", "plugin:@typescript-eslint/recommended", "plugin:svelte/recommended", "prettier", ], plugins: ["@typescript-eslint"], ignorePatterns: ["*.cjs"], overrides: [ { files: ["*.svelte"], parser: "svelte-eslint-parser", parserOptions: { parser: "@typescript-eslint/parser", }, }, ], parserOptions: { sourceType: "module", ecmaVersion: 2020, extraFileExtensions: [".svelte"], }, rules: { "require-yield": "off", "@typescript-eslint/no-explicit-any": "error", "@typescript-eslint/no-non-null-assertion": "error", "@typescript-eslint/no-unused-vars": [ // prevent variables with a _ prefix from being marked as unused "error", { argsIgnorePattern: "^_", }, ], "object-shorthand": ["error", "always"], }, env: { browser: true, es2017: true, node: true, }, };
chat-ui/.eslintrc.cjs/0
{ "file_path": "chat-ui/.eslintrc.cjs", "repo_id": "chat-ui", "token_count": 420 }
51
## Privacy > Last updated: April 15, 2024 Users of HuggingChat are authenticated through their HF user account. We endorse Privacy by Design. As such, your conversations are private to you and will not be shared with anyone, including model authors, for any purpose, including for research or model training purposes. You conversation data will only be stored to let you access past conversations. You can click on the Delete icon to delete any past conversation at any moment. 🗓 Please also consult huggingface.co's main privacy policy at <https://huggingface.co/privacy>. To exercise any of your legal privacy rights, please send an email to <privacy@huggingface.co>. ## About available LLMs The goal of this app is to showcase that it is now possible to build an open source alternative to ChatGPT. 💪 We aim to always provide a diverse set of state of the art open LLMs, hence we rotate the available models over time. Historically, HuggingChat has been running models such as: - [Llama 2 70B](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) - [CodeLlama 35B](https://about.fb.com/news/2023/08/code-llama-ai-for-coding/) - [Falcon 180B](https://www.tii.ae/news/technology-innovation-institute-introduces-worlds-most-powerful-open-llm-falcon-180b) - [Mistral 7B](https://mistral.ai/news/announcing-mistral-7b/) - [Cohere Command R+](https://huggingface.co/chat/models/CohereForAI/c4ai-command-r-plus) - [Google Gemma 7B](https://huggingface.co/chat/models/google/gemma-1.1-7b-it) This is only a partial list. Check the [models](https://huggingface.co/chat/models/) page for up-to-date list of the best available LLMs. ## Technical details This app is running in a [Space](https://huggingface.co/docs/hub/spaces-overview), which entails that the code for this UI is publicly visible [inside the Space repo](https://huggingface.co/spaces/huggingchat/chat-ui/tree/main). **Further development takes place on the [huggingface/chat-ui GitHub repo](https://github.com/huggingface/chat-ui).** The inference backend is running the optimized [text-generation-inference](https://github.com/huggingface/text-generation-inference) on HuggingFace's Inference API infrastructure. It is therefore possible to deploy a copy of this app to a Space and customize it (swap model, add some UI elements, or store user messages according to your own Terms and conditions). You can also 1-click deploy your own instance using the [Chat UI Spaces Docker template](https://huggingface.co/new-space?template=huggingchat/chat-ui-template). We welcome any feedback on this app: please participate to the public discussion at <https://huggingface.co/spaces/huggingchat/chat-ui/discussions> <a target="_blank" href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-discussion-xl.svg" title="open a discussion"></a>
chat-ui/PRIVACY.md/0
{ "file_path": "chat-ui/PRIVACY.md", "repo_id": "chat-ui", "token_count": 871 }
52
# Common Issues ## 403:You don't have access to this conversation Most likely you are running chat-ui over HTTP. The recommended option is to setup something like NGINX to handle HTTPS and proxy the requests to chat-ui. If you really need to run over HTTP you can add `ALLOW_INSECURE_COOKIES=true` to your `.env.local`. Make sure to set your `PUBLIC_ORIGIN` in your `.env.local` to the correct URL as well.
chat-ui/docs/source/configuration/common-issues.md/0
{ "file_path": "chat-ui/docs/source/configuration/common-issues.md", "repo_id": "chat-ui", "token_count": 118 }
53