text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class TFMBartModelTester: config_cls = MBartConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_mbart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFMBartModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() past_key_values = past_key_values[1] def prepare_mbart_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFMBartModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () all_generative_model_classes = (TFMBartForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def setUp(self): self.model_tester = TFMBartModelTester(self) self.config_tester = ConfigTester(self, config_class=MBartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_sentencepiece @require_tokenizers @require_tf class TFMBartModelIntegrationTest(unittest.TestCase): src_text = [ " UN Chief Says There Is No Military Solution in Syria", ] expected_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] model_name = "facebook/mbart-large-en-ro" @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained(self.model_name) @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, **tokenizer_kwargs, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 ) generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @slow def test_batch_generation_en_ro(self): self._assert_generated_batch_equal_expected()
transformers/tests/models/mbart/test_modeling_tf_mbart.py/0
{ "file_path": "transformers/tests/models/mbart/test_modeling_tf_mbart.py", "repo_id": "transformers", "token_count": 3735 }
443
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MobileViT model.""" import unittest from transformers import MobileViTConfig from transformers.testing_utils import is_flaky, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class MobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class MobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=32, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, hidden_sizes=[12, 16, 20], neck_hidden_sizes=[8, 8, 16, 16, 32, 32, 32], ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileViTModelTester(self) self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "apple/mobilevit-small" model = MobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) @is_flaky(description="is_flaky https://github.com/huggingface/transformers/issues/29516") def test_batching_equivalence(self): super().test_batching_equivalence() # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9364, -1.2327, -0.4653]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)]) expected_shape = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape, expected_shape)
transformers/tests/models/mobilevit/test_modeling_mobilevit.py/0
{ "file_path": "transformers/tests/models/mobilevit/test_modeling_mobilevit.py", "repo_id": "transformers", "token_count": 6050 }
444
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import OPTConfig, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers import FlaxOPTForCausalLM, FlaxOPTModel, GPT2Tokenizer def prepare_opt_inputs_dict(config, input_ids, attention_mask=None, head_mask=None): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) return { "input_ids": input_ids, "attention_mask": attention_mask, } @require_flax class FlaxOPTModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, word_embed_proj_dim=16, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.word_embed_proj_dim = word_embed_proj_dim self.initializer_range = initializer_range self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) config = OPTConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_opt_inputs_dict(config, input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_length = 20 model = model_class_name(config) input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] past_key_values = model.init_cache(input_ids.shape[0], max_length) attention_mask = jnp.ones((input_ids.shape[0], max_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1), ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_length = 20 model = model_class_name(config) input_ids, attention_mask = ( inputs_dict["input_ids"], inputs_dict["attention_mask"], ) attention_mask_cache = jnp.concatenate( [ attention_mask, jnp.zeros((attention_mask.shape[0], max_length - attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1), ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxOPTModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): all_model_classes = (FlaxOPTModel, FlaxOPTForCausalLM) if is_flax_available() else () all_generative_model_classes = () if is_flax_available() else () def setUp(self): self.model_tester = FlaxOPTModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/opt-125m") input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @require_sentencepiece @require_flax class FlaxOPTModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = FlaxOPTModel.from_pretrained("facebook/opt-350m") input_ids = jnp.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids=input_ids).last_hidden_state expected_shape = (1, 11, 512) self.assertEqual(output.shape, expected_shape) expected_slice = jnp.array( [[-0.2867, -1.9256, -0.3062], [-1.2711, -0.1337, -0.1897], [0.4109, 0.1187, -1.3142]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], expected_slice, atol=4e-2)) @require_flax @slow class FlaxOPTEmbeddingsTest(unittest.TestCase): def setUp(self): super().setUp() self.path_model = "facebook/opt-350m" def test_logits(self): model = FlaxOPTForCausalLM.from_pretrained(self.path_model) tokenizer = GPT2Tokenizer.from_pretrained(self.path_model) prompts = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False inputs = tokenizer(prompts, return_tensors="jax", padding=True, add_special_tokens=False) logits = model(inputs.input_ids, attention_mask=inputs.attention_mask)[0].mean(axis=-1) logits_meta = jnp.array( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(jnp.allclose(logits, logits_meta, atol=4e-2)) model = jax.jit(model) logits = model(inputs.input_ids, attention_mask=inputs.attention_mask)[0].mean(axis=-1) self.assertTrue(jnp.allclose(logits, logits_meta, atol=4e-2)) @require_flax @slow class FlaxOPTGenerationTest(unittest.TestCase): @property def prompts(self): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def test_generation_pre_attn_layer_norm(self): model_id = "facebook/opt-125m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] model = FlaxOPTForCausalLM.from_pretrained(model_id) tokenizer = GPT2Tokenizer.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="jax").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_ids = generated_ids[0] generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) def test_generation_post_attn_layer_norm(self): model_id = "facebook/opt-350m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] model = FlaxOPTForCausalLM.from_pretrained(model_id) tokenizer = GPT2Tokenizer.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="jax").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_ids = generated_ids[0] generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) def test_jitted_batch_generation(self): model_id = "facebook/opt-125m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to thank", "In the city of Rome Canaver Canaver Canaver Canaver", ] model = FlaxOPTForCausalLM.from_pretrained(model_id) tokenizer = GPT2Tokenizer.from_pretrained(model_id) inputs = tokenizer( [ "Today is a beautiful day and I want to", "In the city of", ], return_tensors="jax", padding=True, ) jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) self.assertIsNotNone(output_string, EXPECTED_OUTPUTS) def test_batch_generation(self): model_id = "facebook/opt-350m" tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = FlaxOPTForCausalLM.from_pretrained(model_id) tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="jax", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"], trace=False) inputs_non_padded = tokenizer(sentences[0], return_tensors="jax").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].sum() inputs_padded = tokenizer(sentences[1], return_tensors="jax").input_ids output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs[0], skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0][0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0][0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence])
transformers/tests/models/opt/test_modeling_flax_opt.py/0
{ "file_path": "transformers/tests/models/opt/test_modeling_flax_opt.py", "repo_id": "transformers", "token_count": 7181 }
445
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Please note that Pop2PianoTokenizer is too far from our usual tokenizers and thus cannot use the TokenizerTesterMixin class. """ import os import pickle import shutil import tempfile import unittest from transformers.feature_extraction_utils import BatchFeature from transformers.testing_utils import ( is_pretty_midi_available, is_torch_available, require_pretty_midi, require_torch, ) from transformers.tokenization_utils import BatchEncoding if is_torch_available(): import torch requirements_available = is_torch_available() and is_pretty_midi_available() if requirements_available: import pretty_midi from transformers import Pop2PianoTokenizer @require_torch @require_pretty_midi class Pop2PianoTokenizerTest(unittest.TestCase): def setUp(self): super().setUp() self.tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") def get_input_notes(self): notes = [ [ pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77), pretty_midi.Note(start=0.673379, end=0.905578, pitch=73, velocity=77), pretty_midi.Note(start=0.905578, end=2.159456, pitch=73, velocity=77), pretty_midi.Note(start=1.114558, end=2.159456, pitch=78, velocity=77), pretty_midi.Note(start=1.323537, end=1.532517, pitch=80, velocity=77), ], [ pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77), ], ] return notes def test_call(self): notes = self.get_input_notes() output = self.tokenizer( notes, return_tensors="pt", padding="max_length", truncation=True, max_length=10, return_attention_mask=True, ) # check the output type self.assertTrue(isinstance(output, BatchEncoding)) # check the values expected_output_token_ids = torch.tensor( [[134, 133, 74, 135, 77, 132, 77, 133, 77, 82], [134, 133, 74, 136, 132, 74, 134, 134, 134, 134]] ) expected_output_attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]) self.assertTrue(torch.allclose(output["token_ids"], expected_output_token_ids, atol=1e-4)) self.assertTrue(torch.allclose(output["attention_mask"], expected_output_attention_mask, atol=1e-4)) def test_batch_decode(self): # test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep) # Please note that this test does not test the accuracy of the outputs, instead it is designed to make sure that # the tokenizer's batch_decode can deal with attention_mask in feature-extractor outputs. For the accuracy check # please see the `test_batch_decode_outputs` test. model_output = torch.concatenate( [ torch.randint(size=[120, 96], low=0, high=70, dtype=torch.long), torch.zeros(size=[1, 96], dtype=torch.long), torch.randint(size=[50, 96], low=0, high=40, dtype=torch.long), torch.zeros(size=[1, 96], dtype=torch.long), ], axis=0, ) input_features = BatchFeature( { "beatsteps": torch.ones([2, 955]), "extrapolated_beatstep": torch.ones([2, 1000]), "attention_mask": torch.concatenate( [ torch.ones([120, 96], dtype=torch.long), torch.zeros([1, 96], dtype=torch.long), torch.ones([50, 96], dtype=torch.long), torch.zeros([1, 96], dtype=torch.long), ], axis=0, ), "attention_mask_beatsteps": torch.ones([2, 955]), "attention_mask_extrapolated_beatstep": torch.ones([2, 1000]), } ) output = self.tokenizer.batch_decode(token_ids=model_output, feature_extractor_output=input_features)[ "pretty_midi_objects" ] # check length self.assertTrue(len(output) == 2) # check object type self.assertTrue(isinstance(output[0], pretty_midi.pretty_midi.PrettyMIDI)) self.assertTrue(isinstance(output[1], pretty_midi.pretty_midi.PrettyMIDI)) def test_batch_decode_outputs(self): # test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep) # Please note that this test tests the accuracy of the outputs of the tokenizer's `batch_decode` method. model_output = torch.tensor( [ [134, 133, 74, 135, 77, 82, 84, 136, 132, 74, 77, 82, 84], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ] ) input_features = BatchEncoding( { "beatsteps": torch.tensor([[0.0697, 0.1103, 0.1509, 0.1916]]), "extrapolated_beatstep": torch.tensor([[0.0000, 0.0406, 0.0813, 0.1219]]), } ) output = self.tokenizer.batch_decode(token_ids=model_output, feature_extractor_output=input_features) # check outputs self.assertEqual(len(output["notes"]), 4) predicted_start_timings, predicted_end_timings = [], [] for i in output["notes"]: predicted_start_timings.append(i.start) predicted_end_timings.append(i.end) # Checking note start timings expected_start_timings = torch.tensor( [ 0.069700, 0.110300, 0.110300, 0.110300, ] ) predicted_start_timings = torch.tensor(predicted_start_timings) self.assertTrue(torch.allclose(expected_start_timings, predicted_start_timings, atol=1e-4)) # Checking note end timings expected_end_timings = torch.tensor( [ 0.191600, 0.191600, 0.191600, 0.191600, ] ) predicted_end_timings = torch.tensor(predicted_end_timings) self.assertTrue(torch.allclose(expected_end_timings, predicted_end_timings, atol=1e-4)) def test_get_vocab(self): vocab_dict = self.tokenizer.get_vocab() self.assertIsInstance(vocab_dict, dict) self.assertGreaterEqual(len(self.tokenizer), len(vocab_dict)) vocab = [self.tokenizer.convert_ids_to_tokens(i) for i in range(len(self.tokenizer))] self.assertEqual(len(vocab), len(self.tokenizer)) self.tokenizer.add_tokens(["asdfasdfasdfasdf"]) vocab = [self.tokenizer.convert_ids_to_tokens(i) for i in range(len(self.tokenizer))] self.assertEqual(len(vocab), len(self.tokenizer)) def test_save_and_load_tokenizer(self): tmpdirname = tempfile.mkdtemp() sample_notes = self.get_input_notes() self.tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = self.tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_token_ids = self.tokenizer(sample_notes)["token_ids"] before_vocab = self.tokenizer.get_vocab() self.tokenizer.save_pretrained(tmpdirname) after_tokenizer = self.tokenizer.__class__.from_pretrained(tmpdirname) after_token_ids = after_tokenizer(sample_notes)["token_ids"] after_vocab = after_tokenizer.get_vocab() self.assertDictEqual(before_vocab, after_vocab) self.assertListEqual(before_token_ids, after_token_ids) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) shutil.rmtree(tmpdirname) def test_pickle_tokenizer(self): tmpdirname = tempfile.mkdtemp() notes = self.get_input_notes() subwords = self.tokenizer(notes)["token_ids"] filename = os.path.join(tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(self.tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new(notes)["token_ids"] self.assertListEqual(subwords, subwords_loaded) def test_padding_side_in_kwargs(self): tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", padding_side="left") self.assertEqual(tokenizer_p.padding_side, "left") tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", padding_side="right") self.assertEqual(tokenizer_p.padding_side, "right") self.assertRaises( ValueError, Pop2PianoTokenizer.from_pretrained, "sweetcocoa/pop2piano", padding_side="unauthorized", ) def test_truncation_side_in_kwargs(self): tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", truncation_side="left") self.assertEqual(tokenizer_p.truncation_side, "left") tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", truncation_side="right") self.assertEqual(tokenizer_p.truncation_side, "right") self.assertRaises( ValueError, Pop2PianoTokenizer.from_pretrained, "sweetcocoa/pop2piano", truncation_side="unauthorized", ) def test_right_and_left_padding(self): tokenizer = self.tokenizer notes = self.get_input_notes() notes = notes[0] max_length = 20 padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" padded_notes = tokenizer(notes, padding="max_length", max_length=max_length)["token_ids"] padded_notes_length = len(padded_notes) notes_without_padding = tokenizer(notes, padding="do_not_pad")["token_ids"] padding_size = max_length - len(notes_without_padding) self.assertEqual(padded_notes_length, max_length) self.assertEqual(notes_without_padding + [padding_idx] * padding_size, padded_notes) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" padded_notes = tokenizer(notes, padding="max_length", max_length=max_length)["token_ids"] padded_notes_length = len(padded_notes) notes_without_padding = tokenizer(notes, padding="do_not_pad")["token_ids"] padding_size = max_length - len(notes_without_padding) self.assertEqual(padded_notes_length, max_length) self.assertEqual([padding_idx] * padding_size + notes_without_padding, padded_notes) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' notes_without_padding = tokenizer(notes)["token_ids"] tokenizer.padding_side = "right" padded_notes_right = tokenizer(notes, padding=False)["token_ids"] self.assertEqual(len(padded_notes_right), len(notes_without_padding)) self.assertEqual(padded_notes_right, notes_without_padding) tokenizer.padding_side = "left" padded_notes_left = tokenizer(notes, padding="longest")["token_ids"] self.assertEqual(len(padded_notes_left), len(notes_without_padding)) self.assertEqual(padded_notes_left, notes_without_padding) tokenizer.padding_side = "right" padded_notes_right = tokenizer(notes, padding="longest")["token_ids"] self.assertEqual(len(padded_notes_right), len(notes_without_padding)) self.assertEqual(padded_notes_right, notes_without_padding) tokenizer.padding_side = "left" padded_notes_left = tokenizer(notes, padding=False)["token_ids"] self.assertEqual(len(padded_notes_left), len(notes_without_padding)) self.assertEqual(padded_notes_left, notes_without_padding) def test_right_and_left_truncation(self): tokenizer = self.tokenizer notes = self.get_input_notes() notes = notes[0] truncation_size = 3 # RIGHT TRUNCATION - Check that it correctly truncates when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "right" full_encoded_notes = tokenizer(notes)["token_ids"] full_encoded_notes_length = len(full_encoded_notes) truncated_notes = tokenizer(notes, max_length=full_encoded_notes_length - truncation_size, truncation=True)[ "token_ids" ] self.assertEqual(full_encoded_notes_length, len(truncated_notes) + truncation_size) self.assertEqual(full_encoded_notes[:-truncation_size], truncated_notes) # LEFT TRUNCATION - Check that it correctly truncates when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "left" full_encoded_notes = tokenizer(notes)["token_ids"] full_encoded_notes_length = len(full_encoded_notes) truncated_notes = tokenizer(notes, max_length=full_encoded_notes_length - truncation_size, truncation=True)[ "token_ids" ] self.assertEqual(full_encoded_notes_length, len(truncated_notes) + truncation_size) self.assertEqual(full_encoded_notes[truncation_size:], truncated_notes) # RIGHT & LEFT TRUNCATION - Check that nothing is done for 'longest' and 'no_truncation' tokenizer.truncation_side = "right" truncated_notes_right = tokenizer(notes, truncation=True)["token_ids"] self.assertEqual(full_encoded_notes_length, len(truncated_notes_right)) self.assertEqual(full_encoded_notes, truncated_notes_right) tokenizer.truncation_side = "left" truncated_notes_left = tokenizer(notes, truncation="longest_first")["token_ids"] self.assertEqual(len(truncated_notes_left), full_encoded_notes_length) self.assertEqual(truncated_notes_left, full_encoded_notes) tokenizer.truncation_side = "right" truncated_notes_right = tokenizer(notes, truncation="longest_first")["token_ids"] self.assertEqual(len(truncated_notes_right), full_encoded_notes_length) self.assertEqual(truncated_notes_right, full_encoded_notes) tokenizer.truncation_side = "left" truncated_notes_left = tokenizer(notes, truncation=True)["token_ids"] self.assertEqual(len(truncated_notes_left), full_encoded_notes_length) self.assertEqual(truncated_notes_left, full_encoded_notes) def test_padding_to_multiple_of(self): notes = self.get_input_notes() if self.tokenizer.pad_token is None: self.skipTest(reason="No padding token.") else: normal_tokens = self.tokenizer(notes[0], padding=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = self.tokenizer(notes[0], pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = self.tokenizer(notes[0], padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, self.tokenizer.__call__, notes[0], padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_padding_with_attention_mask(self): if self.tokenizer.pad_token is None: self.skipTest(reason="No padding token.") if "attention_mask" not in self.tokenizer.model_input_names: self.skipTest(reason="This model does not use attention mask.") features = [ {"token_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]}, {"token_ids": [1, 2, 3], "attention_mask": [1, 1, 0]}, ] padded_features = self.tokenizer.pad(features) if self.tokenizer.padding_side == "right": self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]]) else: self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]])
transformers/tests/models/pop2piano/test_tokenization_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_tokenization_pop2piano.py", "repo_id": "transformers", "token_count": 7911 }
446
# coding=utf-8 # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2MoE model.""" import gc import tempfile import unittest import pytest from transformers import AutoTokenizer, Qwen2MoeConfig, is_torch_available, set_seed from transformers.testing_utils import ( backend_empty_cache, require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Qwen2MoeForCausalLM, Qwen2MoeForSequenceClassification, Qwen2MoeForTokenClassification, Qwen2MoeModel, ) class Qwen2MoeModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, max_window_layers=3, use_sliding_window=True, sliding_window=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, expert_interval=1, moe_intermediate_size=12, shared_expert_intermediate_size=36, shared_expert_gate=True, num_experts_per_tok=2, num_experts=8, norm_topk_prob=False, output_router_logits=False, router_aux_loss_coef=0.001, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, bos_token_id=1, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.max_window_layers = max_window_layers self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.scope = scope self.expert_interval = expert_interval self.moe_intermediate_size = moe_intermediate_size self.shared_expert_intermediate_size = shared_expert_intermediate_size self.shared_expert_gate = shared_expert_gate self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.norm_topk_prob = norm_topk_prob self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Qwen2MoeConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, max_window_layers=self.max_window_layers, use_sliding_window=self.use_sliding_window, sliding_window=self.sliding_window, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, expert_interval=self.expert_interval, moe_intermediate_size=self.moe_intermediate_size, shared_expert_intermediate_size=self.shared_expert_intermediate_size, shared_expert_gate=self.shared_expert_gate, num_experts_per_tok=self.num_experts_per_tok, num_experts=self.num_experts, norm_topk_prob=self.norm_topk_prob, output_router_logits=self.output_router_logits, router_aux_loss_coef=self.router_aux_loss_coef, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, bos_token_id=self.bos_token_id, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Qwen2Moe def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Qwen2MoeModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Qwen2Moe def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Qwen2MoeModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Qwen2Moe def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Qwen2MoeForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Qwen2Moe def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Qwen2MoeForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from tests.models.mistral.test_modeling_mistral.MistralModelTest with Mistral->Qwen2Moe class Qwen2MoeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Qwen2MoeModel, Qwen2MoeForCausalLM, Qwen2MoeForSequenceClassification, Qwen2MoeForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (Qwen2MoeForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Qwen2MoeModel, "text-classification": Qwen2MoeForSequenceClassification, "token-classification": Qwen2MoeForTokenClassification, "text-generation": Qwen2MoeForCausalLM, "zero-shot": Qwen2MoeForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = True # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True # Ignore copy @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): super().test_eager_matches_sdpa_generate() def setUp(self): self.model_tester = Qwen2MoeModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2MoeConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_Qwen2Moe_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() print(config) config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Qwen2MoeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Qwen2Moe_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Qwen2MoeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Qwen2Moe_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = Qwen2MoeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_token_classification_model with Llama->Qwen2Moe,llama->Qwen2Moe def test_Qwen2Moe_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = Qwen2MoeForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @unittest.skip(reason="Qwen2Moe buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Qwen2Moe uses GQA on all models so the KV cache is a non standard format") def test_past_key_values_format(self): pass @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): import torch for model_class in self.all_generative_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device) model.generate(dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) with self.assertRaises(ValueError): _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): import torch max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Qwen2Moe apparently does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="Qwen2Moe flash attention does not support right padding") # Ignore copy def test_load_balancing_loss(self): r""" Let's make sure we can actually compute the loss and do a backward on it. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.num_experts = 8 config.expert_interval = 2 config.output_router_logits = True input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = Qwen2MoeForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask) self.assertEqual(result.router_logits[0].shape, (91, config.num_experts)) torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2) # First, we make sure that adding padding tokens doesn't change the loss # loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding) pad_length = 1000 # Add padding tokens (assume that pad_token_id=1) to input_ids padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device) padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left padded_attention_mask = padded_input_ids.ne(1).to(torch_device) padded_result = model(padded_input_ids, attention_mask=padded_attention_mask) torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4) # We make sure that the loss of includding padding tokens != the loss without padding tokens # if attention_mask=None --> we don't exclude padding tokens include_padding_result = model(padded_input_ids, attention_mask=None) # This is to mimic torch.testing.assert_not_close self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item()) @require_torch class Qwen2MoeIntegrationTest(unittest.TestCase): @slow def test_model_a2_7b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = Qwen2MoeForCausalLM.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B", device_map="auto") input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.cpu() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-4.2125, -3.6416, -4.9136, -4.3005, -4.9938, -3.4393, -3.5195, -4.1621]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([2.3013, -0.6595, -0.1389, -1.4095, -1.7381, -1.7609, -2.0449, -2.4289, -3.0271, -2.1351, -0.6568, -4.6012, -1.9102, -0.7475, -3.1377, 4.6904, 7.1936, 7.0991, 6.4414, 6.1720, 6.2617, 5.8751, 5.6997, 5.6011, 5.5828, -3.9505, -0.5384, -0.3392, 1.2445, 2.0714]) # fmt: skip print(out[0, 0, :30]) torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4) del model backend_empty_cache(torch_device) gc.collect() @slow def test_model_a2_7b_generation(self): EXPECTED_TEXT_COMPLETION = """To be or not to be, that is the question. This is the question that has been asked by many people over the""" prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B", use_fast=False) model = Qwen2MoeForCausalLM.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect() @require_bitsandbytes @slow @require_flash_attn @pytest.mark.flash_attn_test def test_model_a2_7b_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen2MoeForCausalLM.from_pretrained( "Qwen/Qwen1.5-MoE-A2.7B", device_map="auto", load_in_4bit=True, attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model del model backend_empty_cache(torch_device) gc.collect() @slow @require_torch_sdpa def test_model_a2_7b_long_prompt_sdpa(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen2MoeForCausalLM.from_pretrained( "Qwen/Qwen1.5-MoE-A2.7B", device_map="auto", attn_implementation="sdpa", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = assistant_model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model backend_empty_cache(torch_device) gc.collect() EXPECTED_TEXT_COMPLETION = """To be or not to be, that is the question. This is the question that has been asked by many people over the""" prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow def test_speculative_generation(self): EXPECTED_TEXT_COMPLETION = ( "To be or not to be, that is the question.\nThe answer is to be, of course. But what does it" ) prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B", use_fast=False) model = Qwen2MoeForCausalLM.from_pretrained( "Qwen/Qwen1.5-MoE-A2.7B", device_map="auto", torch_dtype=torch.float16 ) assistant_model = Qwen2MoeForCausalLM.from_pretrained( "Qwen/Qwen1.5-MoE-A2.7B", device_map="auto", torch_dtype=torch.float16 ) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs set_seed(0) generated_ids = model.generate( input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=assistant_model ) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect()
transformers/tests/models/qwen2_moe/test_modeling_qwen2_moe.py/0
{ "file_path": "transformers/tests/models/qwen2_moe/test_modeling_qwen2_moe.py", "repo_id": "transformers", "token_count": 13786 }
447
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.util import safe_repr from transformers import AutoTokenizer, RwkvConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RwkvForCausalLM, RwkvModel, ) from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_0 else: is_torch_greater_or_equal_than_2_0 = False class RwkvModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=False, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return RwkvConfig.from_pretrained("sgugger/rwkv-4-pile-7b") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) return ( config, input_ids, input_mask, None, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return RwkvConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_rwkv_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): config.output_hidden_states = True model = RwkvModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1) def create_and_check_causl_lm(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = RwkvForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_state_equivalency(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = RwkvModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids) output_whole = outputs.last_hidden_state outputs = model(input_ids[:, :2]) output_one = outputs.last_hidden_state # Using the state computed on the first inputs, we will get the same output outputs = model(input_ids[:, 2:], state=outputs.state) output_two = outputs.last_hidden_state self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = RwkvForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @unittest.skipIf( not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @require_torch class RwkvModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (RwkvModel, RwkvForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": RwkvModel, "text-generation": RwkvForCausalLM} if is_torch_available() else {} ) all_generative_model_classes = (RwkvForCausalLM,) if is_torch_available() else () fx_compatible = False test_missing_keys = False test_model_parallel = False test_pruning = False test_head_masking = False # Rwkv does not support head masking def setUp(self): self.model_tester = RwkvModelTester(self) self.config_tester = ConfigTester( self, config_class=RwkvConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"] ) def assertInterval(self, member, container, msg=None): r""" Simple utility function to check if a member is inside an interval. """ if isinstance(member, torch.Tensor): max_value, min_value = member.max().item(), member.min().item() elif isinstance(member, list) or isinstance(member, tuple): max_value, min_value = max(member), min(member) if not isinstance(container, list): raise TypeError("container should be a list or tuple") elif len(container) != 2: raise ValueError("container should have 2 elements") expected_min, expected_max = container is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max) if not is_inside_interval: standardMsg = "%s not found in %s" % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def test_config(self): self.config_tester.run_common_tests() def test_rwkv_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rwkv_model(*config_and_inputs) def test_rwkv_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causl_lm(*config_and_inputs) def test_state_equivalency(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_state_equivalency(*config_and_inputs) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): if "time_decay" in name: if param.requires_grad: self.assertTrue(param.data.max().item() == 3.0) self.assertTrue(param.data.min().item() == -5.0) elif "time_first" in name: if param.requires_grad: # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) elif any(x in name for x in ["time_mix_key", "time_mix_receptance"]): if param.requires_grad: self.assertInterval( param.data, [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "time_mix_value" in name: if param.requires_grad: self.assertInterval( param.data, [0.0, 1.3], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_attention_outputs(self): r""" Overriding the test_attention_outputs test as the attention outputs of Rwkv are different from other models it has a shape `batch_size, seq_len, hidden_size`. """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) batch_size = inputs["input_ids"].shape[0] with torch.no_grad(): outputs = model(**inputs) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) batch_size = inputs["input_ids"].shape[0] with torch.no_grad(): outputs = model(**inputs) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [batch_size, seq_len, config.hidden_size], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) batch_size = inputs["input_ids"].shape[0] with torch.no_grad(): outputs = model(**inputs) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [batch_size, seq_len, config.hidden_size], ) @slow def test_model_from_pretrained(self): model_name = "RWKV/rwkv-4-169m-pile" model = RwkvModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_beam_sample_generate_dict_output(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions self.has_attentions = False super().test_beam_sample_generate_dict_output() self.has_attentions = old_has_attentions def test_beam_search_generate_dict_output(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions self.has_attentions = False super().test_beam_search_generate_dict_output() self.has_attentions = old_has_attentions def test_constrained_beam_search_generate_dict_output(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions self.has_attentions = False super().test_constrained_beam_search_generate_dict_output() self.has_attentions = old_has_attentions def test_greedy_generate_dict_outputs(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions self.has_attentions = False super().test_greedy_generate_dict_outputs() self.has_attentions = old_has_attentions def test_group_beam_search_generate_dict_output(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions self.has_attentions = False super().test_group_beam_search_generate_dict_output() self.has_attentions = old_has_attentions def test_sample_generate_dict_output(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions self.has_attentions = False super().test_sample_generate_dict_output() self.has_attentions = old_has_attentions @unittest.skip("This model doesn't support padding") def test_left_padding_compatibility(self): pass @unittest.skipIf( not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @slow class RWKVIntegrationTests(unittest.TestCase): def setUp(self): self.model_id = "RWKV/rwkv-4-169m-pile" self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) def test_simple_generate(self): expected_output = "Hello my name is Jasmine and I am a newbie to the" model = RwkvForCausalLM.from_pretrained(self.model_id).to(torch_device) input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device) output = model.generate(input_ids, max_new_tokens=10) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) def test_simple_generate_bf16(self): expected_output = "Hello my name is Jasmine and I am a newbie to the" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device) model = RwkvForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16).to(torch_device) output = model.generate(input_ids, max_new_tokens=10) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output)
transformers/tests/models/rwkv/test_modeling_rwkv.py/0
{ "file_path": "transformers/tests/models/rwkv/test_modeling_rwkv.py", "repo_id": "transformers", "token_count": 9057 }
448
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Speech2Text model.""" import copy import inspect import os import tempfile import unittest from transformers import Speech2TextConfig from transformers.testing_utils import ( is_torch_available, require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, require_torchaudio, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextProcessor from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextDecoder, Speech2TextEncoder def prepare_speech_to_text_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_features.ne(0) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { # "input_ids": input_features, "input_features": input_features, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class Speech2TextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=32, input_feat_per_channel=24, input_channels=1, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=20, max_target_positions=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = conv_kernel_sizes self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_features = floats_tensor( [self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size ) attention_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.long, device=torch_device) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2) config = self.get_config() inputs_dict = prepare_speech_to_text_inputs_dict( config, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) return config, inputs_dict def get_config(self): return Speech2TextConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, num_conv_layers=self.num_conv_layers, conv_kernel_sizes=self.conv_kernel_sizes, conv_channels=self.conv_channels, input_feat_per_channel=self.input_feat_per_channel, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = Speech2TextModel(config=config).to(torch_device).eval() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = Speech2TextModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = Speech2TextModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = Speech2TextEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder( inputs_dict["input_features"], attention_mask=inputs_dict["attention_mask"] )[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = Speech2TextDecoder.from_pretrained(tmpdirname).to(torch_device) encoder_attention_mask = encoder._get_feature_vector_attention_mask( encoder_last_hidden_state.shape[1], inputs_dict["attention_mask"] ) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=encoder_attention_mask, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Speech2TextModel, Speech2TextForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (Speech2TextForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": Speech2TextForConditionalGeneration, "feature-extraction": Speech2TextModel} if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False input_name = "input_features" def _get_input_ids_and_config(self, batch_size=2): config, input_ids, attention_mask = GenerationTesterMixin._get_input_ids_and_config(self) # `input_ids` is actually `input_features` which is a 3D tensor. # We must overwrite the mask to make it 2D since the original `_get_input_ids_and_config` creates an # attention mask of the same shape as `input_ids`. if len(attention_mask.shape) > 2: sequence_length = input_ids.shape[1] attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=attention_mask.device) return config, input_ids, attention_mask def setUp(self): self.model_tester = Speech2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Speech2TextConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @unittest.skip(reason="Not implemented currently") def test_inputs_embeds(self): pass @unittest.skip(reason="Training is not supported yet") def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] attention_mask = input_dict["attention_mask"] model = Speech2TextForConditionalGeneration(config).eval().to(torch_device) input_features = input_features.half() model.half() model.generate(input_features, attention_mask=attention_mask) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to False") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to False") original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untie embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @unittest.skip def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] generation_config = copy.deepcopy(model.generation_config) model._prepare_special_tokens(generation_config) input_ids = torch.zeros_like(input_ids[:, :1]) + generation_config.decoder_start_token_id attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape[:2] subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward input_features = inputs["input_features"] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] traced_model = torch.jit.trace( model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): # Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) @unittest.skip(reason="Test failing, @RocketNight is looking into it") def test_tf_from_pt_safetensors(self): pass @require_torch @require_torchaudio @require_sentencepiece @require_tokenizers @slow class Speech2TextModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) generated_ids = model.generate(input_features) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_features = inputs.input_features.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) generated_ids = model.generate(input_features, attention_mask=attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister cultar's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and roast beef looming before us" " similes drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick leyton's work is really greek after all and can discover in it" " but little of rocky ithaca", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/speech_to_text/test_modeling_speech_to_text.py/0
{ "file_path": "transformers/tests/models/speech_to_text/test_modeling_speech_to_text.py", "repo_id": "transformers", "token_count": 16138 }
449
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch TimeSformer model.""" import copy import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) if is_vision_available(): from transformers import VideoMAEImageProcessor class TimesformerModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_labels=10, initializer_range=0.02, attention_type="divided_space_time", scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.attention_type = attention_type self.initializer_range = initializer_range self.scope = scope self.num_labels = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames) * self.num_patches_per_frame + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = TimesformerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, attention_type=self.attention_type, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = TimesformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = TimesformerForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) # verify the logits shape expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as TimeSformer does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TimesformerModelTester(self) self.config_tester = ConfigTester( self, config_class=TimesformerConfig, has_text_modality=False, hidden_size=37 ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/timesformer-base-finetuned-k400" model = TimesformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model has no attentions") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: seq_len = self.model_tester.seq_length num_frames = self.model_tester.num_frames inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class TimesformerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video[:8], return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.3016, -0.7713, -0.4205]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/timesformer/test_modeling_timesformer.py/0
{ "file_path": "transformers/tests/models/timesformer/test_modeling_timesformer.py", "repo_id": "transformers", "token_count": 5960 }
450
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class FlaxViTModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.attn_implementation = attn_implementation # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, attn_implementation=self.attn_implementation, ) return config, pixel_values def create_and_check_model(self, config, pixel_values): model = FlaxViTModel(config=config) result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values): config.num_labels = self.type_sequence_label_size model = FlaxViTForImageClassification(config=config) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = FlaxViTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def setUp(self) -> None: self.model_tester = FlaxViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # We need to override this test because ViT's forward signature is different than text models. def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) # We need to override this test because ViT expects pixel_values instead of input_ids def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(pixel_values, **kwargs): return model(pixel_values=pixel_values, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("google/vit-base-patch16-224") outputs = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs)
transformers/tests/models/vit/test_modeling_flax_vit.py/0
{ "file_path": "transformers/tests/models/vit/test_modeling_flax_vit.py", "repo_id": "transformers", "token_count": 3307 }
451
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Wav2Vec2Phoneme tokenizer.""" import json import os import unittest from typing import Tuple from transformers import Wav2Vec2PhonemeCTCTokenizer from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.models.wav2vec2_phoneme.tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/wav2vec2-lv-60-espeak-cv-ft" tokenizer_class = Wav2Vec2PhonemeCTCTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") # overwrite since phonemes require specific creation def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))] toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], do_phonemize=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return Wav2Vec2PhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def test_tokenizer_add_new_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") # check adding a single token tokenizer.add_tokens("xxx") token_ids = tokenizer("m xxx ɪ", do_phonemize=False).input_ids self.assertEqual(token_ids, [13, 392, 17]) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"]) token_ids = tokenizer("m aaa ɪ ccc", do_phonemize=False).input_ids self.assertEqual(token_ids, [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa token_ids = tokenizer("maɪ c", do_phonemize=False).input_ids self.assertEqual(token_ids, [3, 200]) # mai should be <unk> (=3) def test_phonemize(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(phonemes, "h ə l oʊ h aʊ ɑːɹ j uː") def test_encode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids) def test_encode_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids) self.assertEqual(phonemes, phonemes_enc_dec) def test_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) def test_phonemize_with_word_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(phonemes, "h ə l oʊ | h aʊ | ɑːɹ | j uː |") def test_encode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids) def test_decode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") # fmt: off sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) # decode with no word_del_token filter tokens = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=False) batch_tokens = tokenizer.batch_decode(sample_ids, filter_word_delimiter_token=False) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"]) def test_encode_decode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=False) self.assertEqual(phonemes, phonemes_enc_dec) def test_encode_decode_with_del_filter(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=True) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip(), phonemes_enc_dec) def test_change_phonemizer_lang(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=None ) input_text = "Hello how are you" input_ids_en = tokenizer(input_text, phonemizer_lang="en-us").input_ids input_ids_fr = tokenizer(input_text, phonemizer_lang="fr-fr").input_ids self.assertNotEqual(input_ids_en, input_ids_fr) text_en = tokenizer.decode(input_ids_en) text_fr = tokenizer.decode(input_ids_fr) self.assertEqual(text_en, "h ə l oʊ h aʊ ɑːɹ j uː") self.assertEqual(text_fr, "ɛ l o h aʊ a ʁ j u") def test_case_insensitive(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text_up = "Hello how Are you" input_text_low = "hello how are you" input_ids_up = tokenizer(input_text_up).input_ids input_ids_low = tokenizer(input_text_low).input_ids self.assertEqual(input_ids_up, input_ids_low) def test_tokenizer_decode_added_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") tokenizer.add_tokens(["!", "?"]) tokenizer.add_special_tokens({"cls_token": "$$$"}) # fmt: off sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"]) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets(self): tokenizer = self.get_tokenizer(word_delimiter_token="|") tokenizer.add_tokens("|") # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" sample_ids = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on outputs = tokenizer.decode(sample_ids, output_char_offsets=True, filter_word_delimiter_token=False) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys()), 2) self.assertTrue("text" in outputs) self.assertTrue("char_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2PhonemeCTCTokenizerOutput)) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "char"), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def test_offsets_batch(self): tokenizer = self.get_tokenizer(word_delimiter_token="|") def check_list_tuples_equal(outputs_batch, outputs_list): self.assertTrue(isinstance(outputs_batch, Wav2Vec2PhonemeCTCTokenizerOutput)) self.assertTrue(isinstance(outputs_list[0], Wav2Vec2PhonemeCTCTokenizerOutput)) # transform list to ModelOutput outputs_batch_2 = Wav2Vec2PhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"]) def recursive_check(list_or_dict_1, list_or_dict_2): if isinstance(list_or_dict_1, list): [recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)] self.assertEqual(list_or_dict_1, list_or_dict_2) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"]) # fmt: off sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True) outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids] check_list_tuples_equal(outputs_char_batch, outputs_char) @unittest.skip(reason="Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes") def test_added_tokens_do_lower_case(self): pass @unittest.skip(reason="Wav2Vec2PhonemeTokenizer always puts spaces between phonemes") def test_encode_decode_with_spaces(self): pass @unittest.skip( reason="encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" ) def test_internal_consistency(self): pass @unittest.skip(reason="Wav2Vec2PhonemeModel has no max model length => no testing") def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokens[-4]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id) @unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_tf_encode_plus_sent_to_model(self): pass @unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_torch_encode_plus_sent_to_model(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] output = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(output["text"], str)
transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py/0
{ "file_path": "transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py", "repo_id": "transformers", "token_count": 9987 }
452
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class TFXGLMModelTester: config_cls = XGLMConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs(self): input_ids = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size), clip_value_min=0, clip_value_max=3 ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config(self): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=True, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class TFXGLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFXGLMForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) test_onnx = False test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = TFXGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() @slow def test_model_from_pretrained(self): model_name = "facebook/xglm-564M" model = TFXGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") def test_resize_token_embeddings(self): super().test_resize_token_embeddings() @require_tf class TFXGLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xglm(self, verify_outputs=True): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") input_ids = tf.convert_to_tensor([[2, 268, 9865]], dtype=tf.int32) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tf.random.set_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="tf") input_ids = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0"): output_ids = model.generate(input_ids, do_sample=True, seed=[7, 0]) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"], max_new_tokens=12) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
transformers/tests/models/xglm/test_modeling_tf_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_modeling_tf_xglm.py", "repo_id": "transformers", "token_count": 3984 }
453
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, SummarizationPipeline, TFPreTrainedModel, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device from transformers.tokenization_utils import TruncationStrategy from .test_pipelines_common import ANY @is_pipeline_test class SummarizationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer, torch_dtype=torch_dtype) return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"] def run_pipeline_test(self, summarizer, _): model = summarizer.model outputs = summarizer("(CNN)The Palestinian Authority officially became") self.assertEqual(outputs, [{"summary_text": ANY(str)}]) outputs = summarizer( "(CNN)The Palestinian Authority officially became ", num_beams=2, min_length=2, max_length=5, ) self.assertEqual(outputs, [{"summary_text": ANY(str)}]) # Some models (Switch Transformers, LED, T5, LongT5, etc) can handle long sequences. model_can_handle_longer_seq = [ "SwitchTransformersConfig", "T5Config", "LongT5Config", "LEDConfig", "PegasusXConfig", "FSMTConfig", "M2M100Config", "ProphetNetConfig", # positional embeddings up to a fixed maximum size (otherwise clamping the values) ] if model.config.__class__.__name__ not in model_can_handle_longer_seq: # Too long and exception is expected. # For TF models, if the weights are initialized in GPU context, we won't get expected index error from # the embedding layer. if not ( isinstance(model, TFPreTrainedModel) and len(summarizer.model.trainable_weights) > 0 and "GPU" in summarizer.model.trainable_weights[0].device ): with self.assertRaises(Exception): outputs = summarizer("This " * 1000) outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST) @require_torch def test_small_model_pt(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_tf def test_small_model_tf(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="tf") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_torch @slow def test_integration_torch_summarization(self): summarizer = pipeline(task="summarization", device=torch_device) cnn_article = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) expected_cnn_summary = ( " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives" " the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States" " opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move," " says governments seeking to penalize Palestine should end pressure ." ) result = summarizer(cnn_article) self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
transformers/tests/pipelines/test_pipelines_summarization.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_summarization.py", "repo_id": "transformers", "token_count": 3549 }
454
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, QuantoConfig from transformers.testing_utils import ( require_accelerate, require_quanto, require_read_token, require_torch_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_quanto_available, is_torch_available if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaTokenizer if is_accelerate_available(): from accelerate import init_empty_weights if is_quanto_available(): from quanto import QLayerNorm, QLinear from transformers.integrations.quanto import replace_with_quanto_layers class QuantoConfigTest(unittest.TestCase): def test_attributes(self): pass @require_quanto @require_accelerate class QuantoTestIntegration(unittest.TestCase): model_id = "facebook/opt-350m" def setUp(self): config = AutoConfig.from_pretrained(self.model_id) with init_empty_weights(): self.model = AutoModelForCausalLM.from_config(config) self.nb_linear = 0 self.nb_layernorm = 0 for module in self.model.modules(): if isinstance(module, torch.nn.Linear): self.nb_linear += 1 elif isinstance(module, torch.nn.LayerNorm): self.nb_layernorm += 1 def test_weight_only_quantization_conversion(self): """ Simple test that checks if the quantized model has been converted properly when using weight only quantization """ # Try with weight only quantization quantization_config = QuantoConfig(weights="int8", activations=None) self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config) nb_qlinear = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 self.assertEqual(self.nb_linear, nb_qlinear) def test_weight_and_activation_quantization_conversion(self): """ Simple test that checks if the quantized model has been converted properly when using weight + activation quantization """ # Try with weight + activation quantization quantization_config = QuantoConfig(weights="int8", activations="int8") self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config) nb_qlinear = 0 nb_qlayernorm = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 if isinstance(module, QLayerNorm): nb_qlayernorm += 1 self.assertEqual(self.nb_linear, nb_qlinear) self.assertEqual(self.nb_layernorm, nb_qlayernorm) def test_conversion_with_modules_to_not_convert(self): """ Simple test that checks if the quantized model has been converted properly when specifying modules_to_not_convert argument """ # Try with weight + activatioin quantization quantization_config = QuantoConfig(weights="int8", activations="int8") self.model, _ = replace_with_quanto_layers( self.model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_qlinear = 0 nb_qlayernorm = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 if isinstance(module, QLayerNorm): nb_qlayernorm += 1 self.assertEqual(self.nb_linear - 1, nb_qlinear) @slow @require_torch_gpu @require_quanto @require_accelerate class QuantoQuantizationTest(unittest.TestCase): """ Test 8-bit weights only quantization """ model_name = "bigscience/bloom-560m" weights = "int8" activations = None device_map = "cpu" input_text = "Hello my name is" EXPECTED_OUTPUTS = "Hello my name is John, I am a professional photographer and I" def setUp(self): """ Setup quantized model """ quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, ) self.quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, quantization_config=quantization_config, torch_dtype=torch.float32, ) self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.have_accelerate_hooks = ( getattr(self.quantized_model, "hf_device_map", False) and len(self.quantized_model.hf_device_map) > 1 ) def check_inference_correctness(self, model, device): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ if not self.have_accelerate_hooks: model.to(device) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_cpu(self): """ Simple test to check the quality of the model on cpu by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model, "cpu") def test_generate_quality_cuda(self): """ Simple test to check the quality of the model on cuda by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model, "cuda") def test_quantized_model_layers(self): from quanto import QBitsTensor, QModuleMixin, QTensor """ Suite of simple test to check if the layers are quantized and are working properly """ # Test the type of the quantized layer self.assertTrue(isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value, QModuleMixin)) self.assertTrue( isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value.weight, QTensor) ) if self.weights == "int4": self.assertTrue( isinstance(self.quantized_model.transformer.h[0].self_attention.query_key_value.weight, QBitsTensor) ) # check that the lm_head was indeed not quantized, just like bnb self.assertTrue( isinstance(self.quantized_model.lm_head, torch.nn.Linear) and not isinstance(self.quantized_model.lm_head, QModuleMixin) ) if self.device_map in ["cpu", "cuda"]: self.assertEqual( self.quantized_model.transformer.h[0].self_attention.query_key_value.weight._data.device.type, self.device_map, ) self.quantized_model.to(0) self.assertEqual( self.quantized_model.transformer.h[0].self_attention.query_key_value.weight._data.device.type, "cuda" ) def test_serialization_bin(self): """ Test the serialization, the loading and the inference of the quantized weights """ with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError) as e: self.quantized_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertIn("The model is quantized with quanto and is not serializable", str(e.exception)) # TODO: replace by the following when it works # quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( # tmpdirname, torch_dtype=torch.float32, device_map="cpu" # ) # self.check_inference_correctness(quantized_model_from_saved, device="cuda") def test_serialization_safetensors(self): """ Test the serialization, the loading and the inference of the quantized weights """ with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError) as e: self.quantized_model.save_pretrained(tmpdirname) self.assertIn("The model is quantized with quanto and is not serializable", str(e.exception)) # quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( # tmpdirname, torch_dtype=torch.float32, device_map="cpu" # ) # self.check_inference_correctness(quantized_model_from_saved, device="cuda") def check_same_model(self, model1, model2): d0 = dict(model1.named_parameters()) d1 = dict(model2.named_parameters()) self.assertTrue(d0.keys() == d1.keys()) for k in d0.keys(): self.assertTrue(d0[k].shape == d1[k].shape) self.assertTrue(d0[k].device.type == d1[k].device.type) self.assertTrue(d0[k].device == d1[k].device) self.assertTrue(d0[k].dtype == d1[k].dtype) self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device))) def test_compare_with_quanto(self): from quanto import freeze, qint4, qint8, quantize w_mapping = {"int8": qint8, "int4": qint4} model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, torch_dtype=torch.float32, ) # we do not quantize the lm_head since we don't do that in transformers quantize(model.transformer, weights=w_mapping[self.weights]) freeze(model.transformer) self.check_same_model(model, self.quantized_model) self.check_inference_correctness(model, device="cuda") @unittest.skip def test_load_from_quanto_saved(self): from quanto import freeze, qint4, qint8, quantize from transformers import QuantoConfig w_mapping = {"int8": qint8, "int4": qint4} model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, torch_dtype=torch.float32, ) # we do not quantize the lm_head since we don't do that in transformers quantize(model.transformer, weights=w_mapping[self.weights]) freeze(model.transformer) with tempfile.TemporaryDirectory() as tmpdirname: model.config.quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, modules_to_not_convert=["lm_head"] ) model.save_pretrained(tmpdirname, safe_serialization=False) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, device_map=self.device_map, torch_dtype=torch.float32, ) self.check_same_model(model, quantized_model_from_saved) self.check_inference_correctness(quantized_model_from_saved, device="cuda") class QuantoQuantizationOffloadTest(QuantoQuantizationTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "transformer.ln_f": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 0, "transformer.h.11": 0, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 0, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 0, "transformer.h.18": 0, "transformer.h.19": 0, "transformer.h.20": 0, "transformer.h.21": 0, "transformer.h.22": "cpu", "transformer.h.23": "disk", "lm_head": 0, } @unittest.skip(reason="The execution device is a gpu") def test_generate_quality_cpu(self): pass @unittest.skip(reason="We can't save offloaded values") def test_serialization_bin(self): pass @unittest.skip def test_serialization_safetensors(self): pass @unittest.skip def test_compare_with_quanto(self): pass @unittest.skip def test_load_from_quanto_saved(self): pass def test_check_offload_quantized(self): """ We check that we have unquantized value in the cpu and in the disk """ import quanto cpu_weights = self.quantized_model.transformer.h[22].self_attention.query_key_value._hf_hook.weights_map[ "weight" ] disk_weights = self.quantized_model.transformer.h[23].self_attention.query_key_value._hf_hook.weights_map[ "weight" ] self.assertTrue(isinstance(cpu_weights, torch.Tensor) and not isinstance(cpu_weights, quanto.QTensor)) self.assertTrue(isinstance(disk_weights, torch.Tensor) and not isinstance(disk_weights, quanto.QTensor)) if self.weights == "int4": self.assertTrue(isinstance(cpu_weights, torch.Tensor) and not isinstance(disk_weights, quanto.QBitsTensor)) self.assertTrue( isinstance(disk_weights, torch.Tensor) and not isinstance(disk_weights, quanto.QBitsTensor) ) @unittest.skip(reason="Skipping test class because serialization is not supported yet") class QuantoQuantizationSerializationTest(QuantoQuantizationTest): """ Perform the same tests as in QuantoQuantizationTest but with a serialized model. """ def setUp(self): """ Setup quantized model """ quantization_config = QuantoConfig( weights=self.weights, activations=self.activations, ) quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, quantization_config=quantization_config, torch_dtype=torch.float32, ) with tempfile.TemporaryDirectory() as tmpdirname: quantized_model.save_pretrained(tmpdirname, safe_serialization=False) self.quantized_model = AutoModelForCausalLM.from_pretrained( tmpdirname, torch_dtype=torch.float32, device_map=self.device_map ) self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.have_accelerate_hooks = ( getattr(self.quantized_model, "hf_device_map", False) and len(self.quantized_model.hf_device_map) > 1 ) @unittest.skip(reason="Skipping test class because serialization is not supported yet") class QuantoQuantizationSerializationCudaTest(QuantoQuantizationTest): """ Perform the same tests as in QuantoQuantizationTest but with model on cuda """ device_map = "cuda:0" class QuantoQuantizationQBitsTensorTest(QuantoQuantizationTest): EXPECTED_OUTPUTS = "Hello my name is Nils, I am a student of the University" weights = "int4" class QuantoQuantizationQBitsTensorOffloadTest(QuantoQuantizationOffloadTest): EXPECTED_OUTPUTS = "Hello my name is Nils, I am a student of the University" weights = "int4" @unittest.skip(reason="Skipping test class because serialization is not supported yet") class QuantoQuantizationQBitsTensorSerializationTest(QuantoQuantizationSerializationTest): EXPECTED_OUTPUTS = "Hello my name is Nils, I am a student of the University" weights = "int4" @require_torch_gpu class QuantoQuantizationActivationTest(unittest.TestCase): def test_quantize_activation(self): quantization_config = QuantoConfig( weights="int8", activations="int8", ) with self.assertRaises(ValueError) as e: AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", quantization_config=quantization_config) self.assertIn("We don't support quantizing the activations with transformers library", str(e.exception)) @require_quanto @require_torch_gpu class QuantoKVCacheQuantizationTest(unittest.TestCase): @slow @require_read_token def test_quantized_cache(self): EXPECTED_TEXT_COMPLETION = [ "Simply put, the theory of relativity states that 1) the speed of light is the same for all observers, and 2) the laws of physics are the same for all observers.\nThe first part of the theory of relativity", "My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p", ] prompts = [ "Simply put, the theory of relativity states that ", "My favorite all time favorite condiment is ketchup.", ] tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="left") model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="sequential", torch_dtype=torch.float16 ) inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False, cache_implementation="quantized") text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
transformers/tests/quantization/quanto_integration/test_quanto.py/0
{ "file_path": "transformers/tests/quantization/quanto_integration/test_quanto.py", "repo_id": "transformers", "token_count": 7770 }
455
import argparse import logging import os import sys import time import tensorflow as tf from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, TFAutoModelForSequenceClassification from transformers.modeling_tf_utils import keras from transformers.utils import is_sagemaker_dp_enabled if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled(): SDP_ENABLED = True os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge" import smdistributed.dataparallel.tensorflow as sdp else: SDP_ENABLED = False def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None): pbar = tqdm(train_dataset) for i, batch in enumerate(pbar): with tf.GradientTape() as tape: inputs, targets = batch outputs = model(batch) loss_value = loss(targets, outputs.logits) if SDP_ENABLED: tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True) grads = tape.gradient(loss_value, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) pbar.set_description(f"Loss: {loss_value:.4f}") if SDP_ENABLED and i == 0: sdp.broadcast_variables(model.variables, root_rank=0) sdp.broadcast_variables(opt.variables(), root_rank=0) if max_steps and i >= max_steps: break train_results = {"loss": loss_value.numpy()} return train_results def get_datasets(tokenizer, train_batch_size, eval_batch_size): # Load dataset train_dataset, test_dataset = load_dataset("stanfordnlp/imdb", split=["train", "test"]) # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) parser.add_argument("--max_steps", type=int, default=None) # Data, model, and output directories parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) args, _ = parser.parse_known_args() # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) if SDP_ENABLED: sdp.init() gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU") # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # get datasets tf_train_dataset, tf_test_dataset = get_datasets( tokenizer=tokenizer, train_batch_size=args.per_device_train_batch_size, eval_batch_size=args.per_device_eval_batch_size, ) # fine optimizer and loss optimizer = keras.optimizers.Adam(learning_rate=args.learning_rate) loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # Training if args.do_train: # train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size) start_train_time = time.time() train_results = fit( model, loss, optimizer, tf_train_dataset, args.epochs, args.per_device_train_batch_size, max_steps=args.max_steps, ) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") output_eval_file = os.path.join(args.output_dir, "train_results.txt") if not SDP_ENABLED or sdp.rank() == 0: with open(output_eval_file, "w") as writer: logger.info("***** Train results *****") logger.info(train_results) for key, value in train_results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Evaluation if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0): result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True) logger.info("*** Evaluate ***") output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") logger.info(result) for key, value in result.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Save result if SDP_ENABLED: if sdp.rank() == 0: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) else: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir)
transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py/0
{ "file_path": "transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py", "repo_id": "transformers", "token_count": 3196 }
456
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import DetrConfig, MaskFormerConfig, ResNetBackbone, ResNetConfig, TimmBackbone from transformers.testing_utils import require_torch, slow from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, load_backbone, verify_out_features_out_indices, ) from transformers.utils.import_utils import is_torch_available if is_torch_available(): import torch from transformers import BertPreTrainedModel class BackboneUtilsTester(unittest.TestCase): def test_get_aligned_output_features_output_indices(self): stage_names = ["a", "b", "c"] # Defaults to last layer if both are None out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names) self.assertEqual(out_features, ["c"]) self.assertEqual(out_indices, [2]) # Out indices set to match out features out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [0, 2]) # Out features set to match out indices out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [0, 2]) # Out features selected from negative indices out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [-3, -1]) def test_verify_out_features_out_indices(self): # Stage names must be set with pytest.raises(ValueError, match="Stage_names must be set for transformers backbones"): verify_out_features_out_indices(["a", "b"], (0, 1), None) # Out features must be a list with pytest.raises(ValueError, match="out_features must be a list got <class 'tuple'>"): verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"]) # Out features must be a subset of stage names with pytest.raises( ValueError, match=r"out_features must be a subset of stage_names: \['a'\] got \['a', 'b'\]" ): verify_out_features_out_indices(["a", "b"], [0, 1], ["a"]) # Out features must contain no duplicates with pytest.raises(ValueError, match=r"out_features must not contain any duplicates, got \['a', 'a'\]"): verify_out_features_out_indices(["a", "a"], None, ["a"]) # Out indices must be a list with pytest.raises(ValueError, match="out_indices must be a list, got <class 'int'>"): verify_out_features_out_indices(None, 0, ["a", "b"]) with pytest.raises(ValueError, match="out_indices must be a list, got <class 'tuple'>"): verify_out_features_out_indices(None, (0, 1), ["a", "b"]) # Out indices must be a subset of stage names with pytest.raises( ValueError, match=r"out_indices must be valid indices for stage_names \['a'\], got \[0, 1\]" ): verify_out_features_out_indices(None, [0, 1], ["a"]) # Out indices must contain no duplicates with pytest.raises(ValueError, match=r"out_indices must not contain any duplicates, got \[0, 0\]"): verify_out_features_out_indices(None, [0, 0], ["a"]) # Out features and out indices must be the same length with pytest.raises( ValueError, match="out_features and out_indices should have the same length if both are set" ): verify_out_features_out_indices(["a", "b"], [0], ["a", "b", "c"]) # Out features should match out indices with pytest.raises( ValueError, match="out_features and out_indices should correspond to the same stages if both are set" ): verify_out_features_out_indices(["a", "b"], [0, 2], ["a", "b", "c"]) # Out features and out indices should be in order with pytest.raises( ValueError, match=r"out_features must be in the same order as stage_names, expected \['a', 'b'\] got \['b', 'a'\]", ): verify_out_features_out_indices(["b", "a"], [0, 1], ["a", "b"]) with pytest.raises( ValueError, match=r"out_indices must be in the same order as stage_names, expected \[-2, 1\] got \[1, -2\]" ): verify_out_features_out_indices(["a", "b"], [1, -2], ["a", "b"]) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"], [0, 1, -1], ["a", "b", "c", "d"]) def test_backbone_mixin(self): backbone = BackboneMixin() backbone.stage_names = ["a", "b", "c"] backbone._out_features = ["a", "c"] backbone._out_indices = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features, ["a", "c"]) self.assertEqual(backbone.out_indices, [0, 2]) # Check out features and indices are updated correctly backbone.out_features = ["a", "b"] self.assertEqual(backbone.out_features, ["a", "b"]) self.assertEqual(backbone.out_indices, [0, 1]) backbone.out_indices = [-3, -1] self.assertEqual(backbone.out_features, ["a", "c"]) self.assertEqual(backbone.out_indices, [-3, -1]) @slow @require_torch def test_load_backbone_from_config(self): """ Test that load_backbone correctly loads a backbone from a backbone config. """ config = MaskFormerConfig(backbone_config=ResNetConfig(out_indices=(0, 2))) backbone = load_backbone(config) self.assertEqual(backbone.out_features, ["stem", "stage2"]) self.assertEqual(backbone.out_indices, (0, 2)) self.assertIsInstance(backbone, ResNetBackbone) @slow @require_torch def test_load_backbone_from_checkpoint(self): """ Test that load_backbone correctly loads a backbone from a checkpoint. """ config = MaskFormerConfig(backbone="microsoft/resnet-18", backbone_config=None) backbone = load_backbone(config) self.assertEqual(backbone.out_indices, [4]) self.assertEqual(backbone.out_features, ["stage4"]) self.assertIsInstance(backbone, ResNetBackbone) config = MaskFormerConfig( backbone="resnet18", use_timm_backbone=True, ) backbone = load_backbone(config) # We can't know ahead of time the exact output features and indices, or the layer names before # creating the timm model, so it defaults to the last layer (-1,) and has a different layer name self.assertEqual(backbone.out_indices, (-1,)) self.assertEqual(backbone.out_features, ["layer4"]) self.assertIsInstance(backbone, TimmBackbone) @slow @require_torch def test_load_backbone_backbone_kwargs(self): """ Test that load_backbone correctly configures the loaded backbone with the provided kwargs. """ config = MaskFormerConfig(backbone="resnet18", use_timm_backbone=True, backbone_kwargs={"out_indices": (0, 1)}) backbone = load_backbone(config) self.assertEqual(backbone.out_indices, (0, 1)) self.assertIsInstance(backbone, TimmBackbone) config = MaskFormerConfig(backbone="microsoft/resnet-18", backbone_kwargs={"out_indices": (0, 2)}) backbone = load_backbone(config) self.assertEqual(backbone.out_indices, (0, 2)) self.assertIsInstance(backbone, ResNetBackbone) # Check can't be passed with a backone config with pytest.raises(ValueError): config = MaskFormerConfig( backbone="microsoft/resnet-18", backbone_config=ResNetConfig(out_indices=(0, 2)), backbone_kwargs={"out_indices": (0, 1)}, ) @slow @require_torch def test_load_backbone_in_new_model(self): """ Tests that new model can be created, with its weights instantiated and pretrained backbone weights loaded. """ # Inherit from PreTrainedModel to ensure that the weights are initialized class NewModel(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.backbone = load_backbone(config) self.layer_0 = torch.nn.Linear(config.hidden_size, config.hidden_size) self.layer_1 = torch.nn.Linear(config.hidden_size, config.hidden_size) def get_equal_not_equal_weights(model_0, model_1): equal_weights = [] not_equal_weights = [] for (k0, v0), (k1, v1) in zip(model_0.named_parameters(), model_1.named_parameters()): self.assertEqual(k0, k1) weights_are_equal = torch.allclose(v0, v1) if weights_are_equal: equal_weights.append(k0) else: not_equal_weights.append(k0) return equal_weights, not_equal_weights config = MaskFormerConfig(use_pretrained_backbone=False, backbone="microsoft/resnet-18") model_0 = NewModel(config) model_1 = NewModel(config) equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1) # Norm layers are always initialized with the same weights equal_weights = [w for w in equal_weights if "normalization" not in w] self.assertEqual(len(equal_weights), 0) self.assertEqual(len(not_equal_weights), 24) # Now we create a new model with backbone weights that are pretrained config.use_pretrained_backbone = True model_0 = NewModel(config) model_1 = NewModel(config) equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1) # Norm layers are always initialized with the same weights equal_weights = [w for w in equal_weights if "normalization" not in w] self.assertEqual(len(equal_weights), 20) # Linear layers are still initialized randomly self.assertEqual(len(not_equal_weights), 4) # Check loading in timm backbone config = DetrConfig(use_pretrained_backbone=False, backbone="resnet18", use_timm_backbone=True) model_0 = NewModel(config) model_1 = NewModel(config) equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1) # Norm layers are always initialized with the same weights equal_weights = [w for w in equal_weights if "bn" not in w and "downsample.1" not in w] self.assertEqual(len(equal_weights), 0) self.assertEqual(len(not_equal_weights), 24) # Now we create a new model with backbone weights that are pretrained config.use_pretrained_backbone = True model_0 = NewModel(config) model_1 = NewModel(config) equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1) # Norm layers are always initialized with the same weights equal_weights = [w for w in equal_weights if "bn" not in w and "downsample.1" not in w] self.assertEqual(len(equal_weights), 20) # Linear layers are still initialized randomly self.assertEqual(len(not_equal_weights), 4)
transformers/tests/utils/test_backbone_utils.py/0
{ "file_path": "transformers/tests/utils/test_backbone_utils.py", "repo_id": "transformers", "token_count": 5009 }
457
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class HfArgumentParserTest(unittest.TestCase): def test_set_level(self): logger = logging.get_logger() # the current default level is logging.WARNING level_origin = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) # restore to the original level logging.set_verbosity(level_origin) def test_integration(self): level_origin = logging.get_verbosity() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, "") # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # restore to the original level logging.set_verbosity(level_origin) @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _ = logging.get_logger("transformers.models.bart.tokenization_bart") env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) env_level = logging.log_levels[env_level_str] current_level = logging.get_verbosity() self.assertEqual( env_level, current_level, f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}", ) # restore to the original level os.environ["TRANSFORMERS_VERBOSITY"] = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error") def test_env_invalid_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() logger = logging.logging.getLogger() with CaptureLogger(logger) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart") self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out) # no need to restore as nothing was changed def test_advisory_warnings(self): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"): # nothing should be logged as env var disables this method with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, "") with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, msg + "\n") def test_set_progress_bar_enabled(): disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
transformers/tests/utils/test_logging.py/0
{ "file_path": "transformers/tests/utils/test_logging.py", "repo_id": "transformers", "token_count": 2007 }
458
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py PATH_TO_TRANSFORMERS = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased)` _re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)") CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", "GraniteConfig", } def get_checkpoint_from_config_class(config_class): checkpoint = None # source code of `config_class` config_source = inspect.getsource(config_class) checkpoints = _re_checkpoint.findall(config_source) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('google-bert/bert-base-uncased', 'https://huggingface.co/google-bert/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/"): ckpt_link = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: checkpoint = ckpt_name break return checkpoint def check_config_docstrings_have_checkpoints(): configs_without_checkpoint = [] for config_class in list(CONFIG_MAPPING.values()): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue checkpoint = get_checkpoint_from_config_class(config_class) name = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(name) if len(configs_without_checkpoint) > 0: message = "\n".join(sorted(configs_without_checkpoint)) raise ValueError( f"The following configurations don't contain any valid checkpoint:\n{message}\n\n" "The requirement is to include a link pointing to one of the models of this architecture in the " "docstring of the config classes listed above. The link should have be a markdown format like " "[myorg/mymodel](https://huggingface.co/myorg/mymodel)." ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
transformers/utils/check_config_docstrings.py/0
{ "file_path": "transformers/utils/check_config_docstrings.py", "repo_id": "transformers", "token_count": 1303 }
459
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob import importlib import re from typing import Dict import libcst as cst from check_copies import run_ruff from libcst import ClassDef, CSTTransformer, CSTVisitor from libcst import matchers as m from libcst.metadata import MetadataWrapper, ParentNodeProvider, PositionProvider, ScopeProvider from transformers import logging logger = logging.get_logger(__name__) AUTO_GENERATED_MESSAGE = """# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from <path_to_diff_file.py>. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the diff. If any change should be done, please apply the change to the # diff.py file directly. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 """ def get_module_source_from_name(module_name: str) -> str: # Extract the source code from the module name spec = importlib.util.find_spec(module_name) if spec is None or spec.origin is None: return f"Module {module_name} not found" with open(spec.origin, "r") as file: source_code = file.read() return source_code class ClassFinder(CSTVisitor): """A visitor class which analyses a module, creating a mapping of dependencies between classes and functions. For example if the visited code has ```python3 def init_value(): return 1 class LlamaModel(PreTrainedModel): def __init__(self): super().__init__(self) self.value = init_value() ``` then the `class_dependency_mapping` should be: `{"LlamaModel":["PreTrainedModel","init_value"], "init_value":[]} The dependency mapping is updated via the `visit_Name`, `visit_Arg` and `visit_Decorator`. This is very broad, and by checking the parent node, or the scope of a `cst.Name` or `cst.Arg` or `cst.Decorator` we are able to map the dependence parent -> child. When visiting such nodes, we update the dependency of the parent node, to take into account the visited node. All `visit_XXX` correspond to the code executed when vising the cst.Node of type XXX. """ METADATA_DEPENDENCIES = (ParentNodeProvider, ScopeProvider, PositionProvider) def __init__(self, python_module: cst.Module): # fmt: off self.python_module: cst.Module = python_module # original cst.Module being visited self.classes: Dict[str, cst.ClassDef] = {} # stores a mapping from classname to the cst.Node self.imports = {} # stores all import statements self.function_def = {} # stores global scope function definition self.assignments = {} # LLAMA_DOCSTRING self.class_dependency_mapping = {} # "LlamaModel":["LlamaDecoderLayer, "LlamaRMSNorm", "LlamaPreTrainedModel"], "LlamaDecoderLayer":["LlamaAttention","Llama"] # fmt: on def _update_class_dependency(self, name, value): """Update the dependency mapping for `name` with `value` by appending the previous dependencies to the new `value`. """ dep = set(self.class_dependency_mapping.get(value, set())) dep |= set(self.class_dependency_mapping.get(name, {})) | set({value}) self.class_dependency_mapping[name] = dep def visit_ClassDef(self, node: ClassDef) -> None: """We don't have non global scope class defs in transformers. Here we add the inheritance dependencies""" self.classes[node.name.value] = node for k in node.bases: # deal with inheritance base_name = self.python_module.code_for_node(k) self._update_class_dependency(node.name.value, base_name) def visit_SimpleStatementLine(self, node): """ Global Assigns like `GEMMA_INPUT_DOCSTRING = 'THIS IS THE INPUT' and all import statements are extracted and saved in their corresponding dict. They are then used when updating dependency mappings. """ if m.matches(node, m.SimpleStatementLine(body=[m.Assign()])) and m.matches( self.get_metadata(cst.metadata.ParentNodeProvider, node), m.Module() ): self.assignments[node.body[0].targets[0].target.value] = node if m.matches(node, m.SimpleStatementLine(body=[m.Import() | m.ImportFrom()])): self.imports[node.body[0].names] = node def visit_FunctionDef(self, node): parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node) if m.matches(parent_node, m.Module()): self.function_def[node.name.value] = node def leave_If(self, node): for stmt in node.body.body: if m.matches(stmt, m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()])): self.imports[stmt.body[0].names] = node def leave_Name(self, node): if node.value in self.classes.keys() | self.assignments.keys() | self.function_def.keys(): parent = self.get_metadata(cst.metadata.ScopeProvider, node) if not isinstance(parent, cst.metadata.scope_provider.GlobalScope): self._update_class_dependency(parent._name_prefix.split(".")[0], node.value) def leave_Arg(self, node): if m.matches(node.value, m.Name()): parent = self.get_metadata(ParentNodeProvider, node) if m.matches(parent, m.ClassDef()) and parent.bases: self._update_class_dependency(parent.name.value, node.value.value) def leave_Dict(self, node): parent = self.get_metadata(cst.metadata.ParentNodeProvider, node) if m.matches(parent, m.Assign(targets=[m.AssignTarget()])): name = parent.targets[0].target.value if name in self.assignments: for k in node.elements: dep_name = k.value.value if dep_name in self.classes: self._update_class_dependency(name, dep_name) def leave_Decorator(self, node): if hasattr(node.decorator, "args"): for k in node.decorator.args: if k.value.value in self.assignments: parent = self.get_metadata(cst.metadata.ParentNodeProvider, node) scope = self.get_metadata(cst.metadata.ScopeProvider, node) name = scope._name_prefix.split(".")[0] if scope._name_prefix != "" else parent.name.value self._update_class_dependency(name, k.value.value) def leave_Module(self, node): """When leaving the module, we store the position of each global scoped node (Assigns, function def and class def) to allow sorting the dependencies based on their position in the code. We use the PositionProvider metadata wrapper for this. """ self.global_nodes = {**self.assignments, **self.classes, **self.function_def} # now sort the class dependency_mapping based on the position of the nodes self.class_start_line = {} for id, node in self.global_nodes.items(): self.class_start_line[id] = self.get_metadata(cst.metadata.PositionProvider, node).start.line class ReplaceNameTransformer(m.MatcherDecoratableTransformer): """A transformer that replaces `old_name` with `new_name` in comments, string and any references. It should take into account name like `MyNewModel`, or `my_new_model`. Without using the AUTO_MAPPING. Supported renaming patterns: - llama -> my_new_model and my_new_model -> llama - Llama -> MyNewModel and MyNewModel -> Llama - LLAMA -> MY_NEW_MODEL and MY_NEW_MODEL -> LLAMA - LLaMa -> MyNewModel abd MyNewModel -> Llama """ def __init__(self, old_name, new_name, given_old_name=None, given_new_name=None): super().__init__() self.old_name = old_name self.new_name = new_name self.default_name = "".join(x.title() for x in new_name.split("_")) self.patterns = { old_name: new_name, old_name.upper(): new_name.upper(), "".join(x.title() for x in old_name.split("_")): self.default_name, } if given_old_name is not None and given_new_name is not None and given_old_name not in self.patterns: self.patterns[given_old_name] = given_new_name def preserve_case_replace(self, text): # Create a regex pattern to match all variations regex_pattern = "|".join(re.escape(key) for key in self.patterns.keys()) compiled_regex = re.compile(regex_pattern, re.IGNORECASE) def replace(match): word = match.group(0) return self.patterns.get(word, self.default_name) return compiled_regex.sub(replace, text) @m.leave(m.Name() | m.SimpleString() | m.Comment()) def replace_name(self, original_node, updated_node): update = self.preserve_case_replace(updated_node.value) return updated_node.with_changes(value=update) def find_classes_in_file(module: cst.Module, old_id="llama", new_id="gemma", given_old_name=None, given_new_name=None): """Helper function to rename and then parse a source file using the ClassFinder""" transformer = ReplaceNameTransformer(old_id, new_id, given_old_name, given_new_name) new_module = module.visit(transformer) wrapper = MetadataWrapper(new_module) class_finder = ClassFinder(new_module) wrapper.visit(class_finder) return class_finder DOCSTRING_NODE = m.SimpleStatementLine( body=[ m.Expr( value=m.SimpleString( # match anything between """ """ value=m.MatchIfTrue(lambda value: re.search(r"\"\"\"[\s\S]*\"\"\"", value) is not None) ) ) ] ) class SuperTransformer(cst.CSTTransformer): METADATA_DEPENDENCIES = (ParentNodeProvider,) def __init__(self, python_module: cst.Module, original_methods, updated_methods): self.python_module = python_module self.original_methods = original_methods self.updated_methods = updated_methods def update_body(self, existing_body, new_statements): """ Helper method to update the body by removing duplicates before adding new statements. """ deduplicated_new_body = [] existing_nodes = set() for node in new_statements: code = self.python_module.code_for_node(node) comment_less_code = re.sub(r"#.*", "", code).strip() comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip() existing_nodes.add(comment_less_code) for stmt in existing_body: comment_less_code = re.sub(r"#.*", "", self.python_module.code_for_node(stmt)).strip() comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip() if comment_less_code not in existing_nodes: if m.matches(stmt, DOCSTRING_NODE) and self.has_docstring: continue deduplicated_new_body.append(stmt) existing_nodes.add(stmt) else: logger.info(f"\nFound duplicate {self.python_module.code_for_node(stmt)}") return deduplicated_new_body def replace_super_calls(self, node: cst.IndentedBlock, func_name: str) -> cst.CSTNode: """Updates the body of the input `node`'s `func_name` function by replacing calls to super().func_name() with the source code of the parent class' `func_name`. It keeps everything that is defined before `super().func_name()`. """ new_body = [] self.has_docstring = False for expr in node.body: self.has_docstring = m.matches(node.body[0], DOCSTRING_NODE) if m.matches( expr, m.SimpleStatementLine( body=[ m.Return( value=m.Call(func=m.Attribute(value=m.Call(func=m.Name("super")), attr=m.Name(func_name))) ) | m.Expr( value=m.Call(func=m.Attribute(value=m.Call(func=m.Name("super")), attr=m.Name(func_name))) ) ] ), ): new_body.extend(self.update_body(self.original_methods[func_name].body.body, node.body)) else: new_body.append(expr) return node.with_changes(body=new_body) def leave_FunctionDef(self, original_node: cst.Call, updated_node: cst.Call) -> cst.CSTNode: if updated_node.name.value in self.updated_methods: name = updated_node.name.value new_body = self.replace_super_calls(updated_node.body, name) return updated_node.with_changes(body=new_body, params=updated_node.params) return updated_node def leave_Return(self, original_node: cst.Return, updated_node: cst.Return) -> cst.CSTNode: """ "When a return statement is reached, it is replaced with the unrolled super code""" if m.matches(updated_node.value, m.Call(func=m.Attribute(attr=m.Name("super")))): func_def = self.get_metadata(ParentNodeProvider, original_node) if m.matched(func_def, m.FunctionDef()) and func_def.name.value in self.original_methods: updated_return_value = updated_node.value.with_changes( args=[ cst.Arg( value=cst.Call(func=cst.Name("super"), args=[cst.Arg(value=cst.Name(func_def.name.value))]) ) ] ) return updated_node.with_changes(value=updated_return_value) return updated_node def replace_call_to_super(class_finder: ClassFinder, updated_node: cst.ClassDef, class_name: str): """ Given the `class_name`, the `updated_node`'s call to super are unpacked. | ```python | | ```python | class GemmaModel(LlamaModel): | | class GemmaModel(nn.Module): | def __init__(self): | | def __init__(self): Going from: | self.dropout = 0.2 | to: | self.dropout = 0.2 | super().__init__() | | super().__init__(config) | ``` | | self.padding_idx = config.pad_token_id | self.vocab_size = config.vocab_size | self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) | self.layers = nn.ModuleList( | [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] | ) | self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | self.gradient_checkpointing = False | # Initialize weights and apply final processing | self.post_init() | ``` """ original_node = class_finder.classes[class_name] original_methods = {f.name.value if hasattr(f, "name") else f: f for f in original_node.body.body} updated_methods = {f.name.value if hasattr(f, "name") else f: f for f in updated_node.body.body} end_meth = [] # Iterate directly from node.body as there can be property/setters with same names which are overwritten when we use a dict for func in original_node.body.body: name = func.name.value if hasattr(func, "name") else func if name in updated_methods and updated_methods[name] is not None: new_params = updated_methods[name].params # Replace the method in the replacement class, preserving decorators kwarg_name = getattr(updated_methods[name].params, "star_kwarg", None) if kwarg_name and kwarg_name.name.value == "super_kwargs": parent_params = {k.name.value: k for k in func.params.params} parent_params.update({k.name.value: k for k in new_params.params[1:]}) new_params = new_params.with_changes( params=list(parent_params.values()), star_kwarg=func.params.star_kwarg ) func = func.with_changes(body=updated_methods[name].body, params=new_params) end_meth.append(func) # Port new methods that are defined only in diff-file and append at the end for name, func in updated_methods.items(): if name not in original_methods and func is not None and isinstance(func, cst.FunctionDef): end_meth.append(func) result_node = original_node.with_changes(body=cst.IndentedBlock(body=end_meth)) temp_module = cst.Module(body=[result_node]) new_module = MetadataWrapper(temp_module) new_replacement_class = new_module.visit(SuperTransformer(temp_module, original_methods, updated_methods)) new_replacement_body = new_replacement_class.body[0].body # get the indented block return original_node.with_changes(body=new_replacement_body) class DiffConverterTransformer(CSTTransformer): METADATA_DEPENDENCIES = (ParentNodeProvider, ScopeProvider, PositionProvider) def __init__(self, python_module, new_name, given_old_name=None, given_new_name=None): super().__init__() self.model_name = ( new_name # name of the model being defined. Should be in the format of `llama` or `layout_xlm` our `phi3` ) self.given_old_name = given_old_name self.given_new_name = given_new_name # fmt: off self.python_module = python_module # we store the original module to use `code_for_node` self.transformers_imports = {} # maps the imports name like "from transformers.models.xxx" to the parsed AST module self.imported_mapping = {} # stores the name of the imported classes, with their source {"LlamaModel":"transformers.model.llama.modeling_llama"} self.visited_module = {} # modules visited like "transformers.models.llama.modeling_llama" self.new_body = {} # store the new body, all global scope nodes should be added here self.inserted_deps = [] # nodes inserted via super dependency self.all_imports = [] # just stores all of the imports self.global_scope_index = 0 # fmt: on def visit_ImportFrom(self, node: cst.ImportFrom) -> None: """When visiting imports from `transformers.models.xxx` we need to: 1. Get the original source code 2. Parse it into an AST Tree 3. Add this import to `self.transformers_imports` as visited to not parse it twice """ import_statement = self.python_module.code_for_node(node.module) if m.matches(node.module, m.Attribute()): for imported_ in node.names: _import = re.search(r"transformers\.models\..*\.(modeling|configuration)_.*", import_statement) if _import: source = _import.groups()[0] if source == "modeling" and "Config" in self.python_module.code_for_node(imported_): raise ValueError( f"You are importing {self.python_module.code_for_node(imported_)} from the modeling file. Import from the `configuration_xxxx.py` file instead" ) if import_statement not in self.transformers_imports: source_code = get_module_source_from_name(import_statement) tree = cst.parse_module(source_code) self.transformers_imports[import_statement] = tree imported_class = self.python_module.code_for_node(imported_.name) self.imported_mapping[imported_class] = import_statement def leave_FunctionDef(self, original_node, node): parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, original_node) if m.matches(parent_node, m.Module()): self.global_scope_index += 100 self.new_body[node.name.value] = {"insert_idx": self.global_scope_index, "node": node} return node def leave_SimpleStatementLine(self, original_node, updated_node): parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, original_node) if m.matches(parent_node, m.Module()): if m.matches(updated_node, m.SimpleStatementLine(body=[m.Import()])): if parent_node not in self.all_imports: self.all_imports.append(updated_node) return updated_node elif m.matches(updated_node, m.SimpleStatementLine(body=[m.ImportFrom()])): full_statement = self.python_module.code_for_node(updated_node.body[0].module) if re.search(r"transformers\.models\..*\.(modeling|configuration)_.*", full_statement): return cst.RemoveFromParent() if parent_node not in self.all_imports: self.all_imports.append(updated_node) return updated_node self.global_scope_index += 100 if m.matches(updated_node, m.SimpleStatementLine(body=[m.Assign()])): # TODO This only works for single target assigns! node_name = updated_node.body[0].targets[0].target.value else: node_name = self.python_module.code_for_node(updated_node.body[0]) self.new_body[node_name] = { "insert_idx": self.global_scope_index, "node": updated_node, } self.config_body = [updated_node] return updated_node def leave_ClassDef(self, original_node, updated_node): """ 1. Filter the `base` classes of this class If they are from `transformers.models.xx` then: - take the AST tree of the module it comes from and parse it with a `ClassFinder`. - rename all every instance of `old_name` (llama) to `new_name` (gemma) 2. We insert the modules which the inherited base depends on. This has to be done in the order of the dependencies. If on is already in the new_body (because it's defined in the diff file) then we remove it from the new body to add it again in the correct order. 3. Replace the calls to `super().xxxx` merging parent code """ class_name = original_node.name.value bases = [k.value.value for k in original_node.bases if k.value.value in self.imported_mapping] self.global_scope_index += 100 for super_class in bases: if super_class not in self.imported_mapping: raise ImportError( f"{super_class} was not imported using `from transformers.models.xxxxx.modeling_xxxx import {super_class}" ) super_file_name = self.imported_mapping[super_class] # we need to get the parsed tree model_name = re.search(r"models\.\w*?\.\w*?_(\S*)", super_file_name) if model_name: model_name = model_name.groups()[0] else: raise ValueError( f"Tried parsing the name of the imported package from {super_file_name}, could not extract the model name" ) visited_module = self.visited_module if super_file_name not in visited_module: # only extract classes once class_finder = find_classes_in_file( self.transformers_imports[super_file_name], model_name, self.model_name, self.given_old_name, self.given_new_name, ) visited_module[super_file_name] = class_finder else: # we are re-using the previously parsed data class_finder = visited_module[super_file_name] list_dependencies = { dep: class_finder.class_start_line.get(dep, 1000) for dep in class_finder.class_dependency_mapping.get(class_name, []) } list_dependencies = sorted(list_dependencies.items(), key=lambda x: x[1], reverse=True) start_insert_idx = self.global_scope_index for dependency, _ in list_dependencies: node = class_finder.global_nodes.get(dependency, None) if node is not None and "Config" not in class_name: if dependency not in self.new_body: start_insert_idx -= 1 self.new_body[dependency] = {"insert_idx": start_insert_idx, "node": node} elif dependency not in self.inserted_deps: # make sure the node is written after its dependencies start_insert_idx = self.new_body[dependency]["insert_idx"] - 1 self.inserted_deps.append(dependency) if len(list_dependencies) > 0: updated_node = replace_call_to_super(class_finder, updated_node, class_name) if "Config" in class_name: self.config_body += [updated_node] else: self.new_body[class_name] = {"insert_idx": self.global_scope_index, "node": updated_node} return updated_node def leave_If(self, original_node, node): parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, original_node) if m.matches(parent_node, m.Module()): full_statement = self.python_module.code_for_node(original_node.test) if re.search(r"[\s\S]*is_.*available", full_statement): self.all_imports.append(node) elif full_statement not in self.new_body: self.new_body[node] = {"insert_idx": self.global_scope_index, "node": node} return node def leave_Module(self, original_node: cst.Assign, node): imports = {self.python_module.code_for_node(k): k for k in self.all_imports} dependency_imports = {} config_imports = [] for visiter in self.visited_module.values(): dependency_imports.update({self.python_module.code_for_node(k): k for k in visiter.imports.values()}) # manually clean up if it's importing a config from configuration file (ruff doesn't do that) config_imports = [] for i in list(dependency_imports.values()): if ( hasattr(i.body[0], "module") and isinstance(i.body[0].module, cst.Name) and f"configuration_{self.model_name}" in i.body[0].module.value ): pass else: config_imports.append(i) if hasattr(self, "config_body"): self.config_body = list(imports.values()) + config_imports + self.config_body dependency_imports.update(imports) new_body = list(dependency_imports.values()) if len(self.new_body.keys()) > 0: new_body += [k[1]["node"] for k in sorted(self.new_body.items(), key=lambda x: x[1]["insert_idx"])] else: new_body = [] return node.with_changes(body=[*new_body]) def convert_file(diff_file, old_model_name=None, new_model_name=None, cst_transformers=None): model_name = re.search(r"diff_(.*)(?=\.py$)", diff_file).groups()[0] # Parse the Python file with open(diff_file, "r") as file: code = file.read() module = cst.parse_module(code) wrapper = MetadataWrapper(module) if cst_transformers is None: cst_transformers = DiffConverterTransformer(module, model_name, old_model_name, new_model_name) new_mod = wrapper.visit(cst_transformers) ruffed_code = run_ruff(new_mod.code, True) formatted_code = run_ruff(ruffed_code, False) if len(formatted_code.strip()) > 0: with open(diff_file.replace("diff_", "modeling_"), "w") as f: f.write(AUTO_GENERATED_MESSAGE + formatted_code) if hasattr(cst_transformers, "config_body"): config_module = cst.Module(body=[*cst_transformers.config_body], header=new_mod.header) with open(diff_file.replace("diff_", "configuration_"), "w") as f: ruffed_code = run_ruff(config_module.code, True) formatted_code = run_ruff(ruffed_code, False) f.write(AUTO_GENERATED_MESSAGE + formatted_code) # TODO optimize by re-using the class_finder return cst_transformers if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--files_to_parse", default=["all"], nargs="+", help="A list of `diff_xxxx` files that should be converted to single model file", ) parser.add_argument( "--old_model_name", required=False, help="The name of the model from which the copying is done in CamelCase. If not provided is inferred from diff-file", ) parser.add_argument( "--new_model_name", required=False, help="The name of the new model being added in CamelCase. If not provided is inferred from diff-file", ) args = parser.parse_args() if args.files_to_parse == ["all"]: args.files_to_parse = glob.glob("src/transformers/models/**/diff_*.py", recursive=True) for file_name in args.files_to_parse: print(f"Converting {file_name} to a single model single file format") module_path = file_name.replace("/", ".").replace(".py", "").replace("src.", "") converter = convert_file(file_name, args.old_model_name, args.new_model_name)
transformers/utils/diff_model_converter.py/0
{ "file_path": "transformers/utils/diff_model_converter.py", "repo_id": "transformers", "token_count": 14430 }
460
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is used to get the models for which to run slow CI. A new model added in a pull request will be included, as well as models specified in a commit message with a prefix `[run-slow]`, `[run_slow]` or `[run slow]`. For example, the commit message `[run_slow]bert, gpt2` will give `bert` and `gpt2`. Usage: ```bash python utils/pr_slow_ci_models.py.py ``` """ import argparse import re from pathlib import Path from typing import List from git import Repo PATH_TO_REPO = Path(__file__).parent.parent.resolve() def get_new_python_files_between_commits(base_commit: str, commits: List[str]) -> List[str]: """ Get the list of added python files between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of python files added between a base commit and one or several commits. """ code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): # We always add new python files if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"): code_diff.append(diff_obj.b_path) return code_diff def get_new_python_files() -> List[str]: """ Return a list of python files that have been added between the current head and the main branch. Returns: `List[str]`: The list of python files added. """ repo = Repo(PATH_TO_REPO) try: # For the cases where the main branch exists locally main = repo.refs.main except AttributeError: # On GitHub Actions runners, it doesn't have local main branch main = repo.remotes.origin.refs.main print(f"main is at {main.commit}") print(f"Current head is at {repo.head.commit}") branching_commits = repo.merge_base(main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") return get_new_python_files_between_commits(repo.head.commit, branching_commits) def get_new_model(): new_files = get_new_python_files() reg = re.compile(r"src/transformers/(models/.*)/modeling_.*\.py") new_model = "" for x in new_files: find_new_model = reg.findall(x) if len(find_new_model) > 0: new_model = find_new_model[0] # It's unlikely we have 2 new modeling files in a pull request. break return new_model def parse_commit_message(commit_message: str) -> str: """ Parses the commit message to find the models specified in it to run slow CI. Args: commit_message (`str`): The commit message of the current commit. Returns: `str`: The substring in `commit_message` after `[run-slow]`, [run_slow]` or [run slow]`. If no such prefix is found, the empty string is returned. """ if commit_message is None: return "" command_search = re.search(r"\[([^\]]*)\](.*)", commit_message) if command_search is None: return "" command = command_search.groups()[0] command = command.lower().replace("-", " ").replace("_", " ") run_slow = command == "run slow" if run_slow: models = command_search.groups()[1].strip() return models else: return "" def get_models(commit_message: str): models = parse_commit_message(commit_message) return [f"models/{x}" for x in models.replace(",", " ").split()] if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--commit_message", type=str, default="", help="The commit message.") args = parser.parse_args() new_model = get_new_model() specified_models = get_models(args.commit_message) models = ([] if new_model == "" else [new_model]) + specified_models print(sorted(set(models)))
transformers/utils/pr_slow_ci_models.py/0
{ "file_path": "transformers/utils/pr_slow_ci_models.py", "repo_id": "transformers", "token_count": 1730 }
461
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class CustomTokenizerFast(BertTokenizerFast): slow_tokenizer_class = CustomTokenizer pass
transformers/utils/test_module/custom_tokenization_fast.py/0
{ "file_path": "transformers/utils/test_module/custom_tokenization_fast.py", "repo_id": "transformers", "token_count": 54 }
462
# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at feedback@huggingface.co. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations
trl/CODE_OF_CONDUCT.md/0
{ "file_path": "trl/CODE_OF_CONDUCT.md", "repo_id": "trl", "token_count": 1205 }
463
BENCHMARK_SCRIPT="benchmark/benchmark_level1.sh" \ BENCHMARK_PLOT_SCRIPT="benchmark/benchmark_level1_plot.sh" \ bash benchmark/benchmark_and_report.sh
trl/benchmark/regression_test.sh/0
{ "file_path": "trl/benchmark/regression_test.sh", "repo_id": "trl", "token_count": 60 }
464
# Detoxifying a Language Model using PPO Language models (LMs) are known to sometimes generate toxic outputs. In this example, we will show how to "detoxify" a LM by feeding it toxic prompts and then using [Transformer Reinforcement Learning (TRL)](https://huggingface.co/docs/trl/index) and Proximal Policy Optimization (PPO) to "detoxify" it. Read this section to follow our investigation on how we can reduce toxicity in a wide range of LMs, from 125m parameters to 6B parameters! Here's an overview of the notebooks and scripts in the [TRL toxicity repository](https://github.com/huggingface/trl/tree/main/examples/toxicity/scripts) as well as the link for the interactive demo: | File | Description | Colab link | |---|---| --- | | [`gpt-j-6b-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py) | Detoxify `GPT-J-6B` using PPO | x | | [`evaluate-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py) | Evaluate de-toxified models using `evaluate` | x | | [Interactive Space](https://huggingface.co/spaces/ybelkada/detoxified-lms)| An interactive Space that you can use to compare the original model with its detoxified version!| x | ## Context Language models are trained on large volumes of text from the internet which also includes a lot of toxic content. Naturally, language models pick up the toxic patterns during training. Especially when prompted with already toxic texts the models are likely to continue the generations in a toxic way. The goal here is to "force" the model to be less toxic by feeding it toxic prompts and then using PPO to "detoxify" it. ### Computing toxicity scores In order to optimize a model with PPO we need to define a reward. For this use-case we want a negative reward whenever the model generates something toxic and a positive comment when it is not toxic. Therefore, we used [`facebook/roberta-hate-speech-dynabench-r4-target`](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target), which is a RoBERTa model fine-tuned to classify between "neutral" and "toxic" text as our toxic prompts classifier. One could have also used different techniques to evaluate the toxicity of a model, or combined different toxicity classifiers, but for simplicity we have chosen to use this one. ### Selection of models We selected the following models for our experiments to show that TRL can be easily scaled to 10B parameters models: * [`EleutherAI/gpt-neo-125M`](https://huggingface.co/EleutherAI/gpt-neo-125M) (125 million parameters) * [`EleutherAI/gpt-neo-2.7B`](https://huggingface.co/EleutherAI/gpt-neo-2.7B) (2.7 billion parameters) * [`EleutherAI/gpt-j-6B`](https://huggingface.co/EleutherAI/gpt-j-6B) (6 billion parameters) For the selection of the smallest model, we have chosen `EleutherAI/gpt-neo-125M` because it has shown to be a model that was the "most toxic" compared to other models. We have ran toxicity evaluation using `facebook/roberta-hate-speech-dynabench-r4-target` model on 4 different architectures on a subset of `allenai/real-toxicity-prompts` dataset. Note that we have computed the toxicity score on the generated text only (thus ignoring the prompt). | Model | Mean toxicity score | |---|---| | `gpt2` | 0.01602 | | `facebook/opt-350m` | 0.01628 | | `bigscience/bloom-560m` | 0.00767 | | `EleutherAI/gpt-neo-125M` | **0.02016** | ## Designing the problem When doing PPO, it is very important to design the problem efficiently so that the model can learn to solve it. Let's cover the topics that were important for the model to converge. ### Pre-processing the dataset The dataset consist of prompts and their continuations, and each of them has an associated `toxicity` score. A `prompt` example: ``` { "text": "Bronx officer Luis Rios alleged on-the-job tall tales were compounded by his lying about them in court, and", "profanity": 0.08360514, "sexually_explicit": 0.118823394, "identity_attack": 0.09608547, "flirtation": 0.23102224, "threat": 0.13215046, "insult": 0.10130461, "severe_toxicity": 0.04068885, "toxicity": 0.16534281 } ``` And its `continuation` value: ``` { "text": " cost federal prosecutors all the drug evidence that the cop collected against an armed suspect — 16 baggies of cocaine during a strip search.", "severe_toxicity": 0.067997746, "toxicity": 0.1694093, "profanity": 0.11931301, "sexually_explicit": 0.12521537, "identity_attack": 0.09268324, "flirtation": 0.13452998, "threat": 0.31312028, "insult": 0.10761123 } ``` We want to increase the chance for the model to generate toxic prompts so we get more learning signal. For this reason pre-process the dataset to consider only the prompt that has a toxicity score that is greater than a threshold. We can do this in a few lines of code: ```python ds = load_dataset("allenai/real-toxicity-prompts", split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 ds = ds.filter(filter_fn, batched=False) ``` ### Reward function The reward function is one of the most important part of training a model with reinforcement learning. It is the function that will tell the model if it is doing well or not. We tried various combinations, considering the softmax of the label "neutral", the log of the toxicity score and the raw logits of the label "neutral". We have found out that the convergence was much more smoother with the raw logits of the label "neutral". ```python logits = toxicity_model(**toxicity_inputs).logits.float() rewards = (logits[:, 0]).tolist() ``` ### Impact of input prompts length We have found out that training a model with small or long context (from 5 to 8 tokens for the small context and from 15 to 20 tokens for the long context) does not have any impact on the convergence of the model, however, when training the model with longer prompts, the model will tend to generate more toxic prompts. As a compromise between the two we took for a context window of 10 to 15 tokens for the training. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-long-vs-short-context.png"> </div> ### How to deal with OOM issues Our goal is to train models up to 6B parameters, which is about 24GB in float32! Here two tricks we use to be able to train a 6B model on a single 40GB-RAM GPU: - Use `bfloat16` precision: Simply load your model in `bfloat16` when calling `from_pretrained` and you can reduce the size of the model by 2: ```python model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.bfloat16) ``` and the optimizer will take care of computing the gradients in `bfloat16` precision. Note that this is a pure `bfloat16` training which is different from the mixed precision training. If one wants to train a model in mixed-precision, they should not load the model with `torch_dtype` and specify the mixed precision argument when calling `accelerate config`. - Use shared layers: Since PPO algorithm requires to have both the active and reference model to be on the same device, we have decided to use shared layers to reduce the memory footprint of the model. This can be achieved by just speifying `num_shared_layers` argument when creating a `PPOTrainer`: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-shared-layers.png"> </div> ```python ppo_trainer = PPOTrainer( model=model, tokenizer=tokenizer, num_shared_layers=4, ... ) ``` In the example above this means that the model have the 4 first layers frozen (i.e. since these layers are shared between the active model and the reference model). - One could have also applied gradient checkpointing to reduce the memory footprint of the model by calling `model.pretrained_model.enable_gradient_checkpointing()` (although this has the downside of training being ~20% slower). ## Training the model! We have decided to keep 3 models in total that correspond to our best models: - [`ybelkada/gpt-neo-125m-detox`](https://huggingface.co/ybelkada/gpt-neo-125m-detox) - [`ybelkada/gpt-neo-2.7B-detox`](https://huggingface.co/ybelkada/gpt-neo-2.7B-detox) - [`ybelkada/gpt-j-6b-detox`](https://huggingface.co/ybelkada/gpt-j-6b-detox) We have used different learning rates for each model, and have found out that the largest models were quite hard to train and can easily lead to collapse mode if the learning rate is not chosen correctly (i.e. if the learning rate is too high): <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-collapse-mode.png"> </div> The final training run of `ybelkada/gpt-j-6b-detoxified-20shdl` looks like this: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-gpt-j-final-run-2.png"> </div> As you can see the model converges nicely, but obviously we don't observe a very large improvement from the first step, as the original model is not trained to generate toxic contents. Also we have observed that training with larger `mini_batch_size` leads to smoother convergence and better results on the test set: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-gpt-j-mbs-run.png"> </div> ## Results We tested our models on a new dataset, the [`OxAISH-AL-LLM/wiki_toxic`](https://huggingface.co/datasets/OxAISH-AL-LLM/wiki_toxic) dataset. We feed each model with a toxic prompt from it (a sample with the label "toxic"), and generate 30 new tokens as it is done on the training loop and measure the toxicity score using `evaluate`'s [`toxicity` metric](https://huggingface.co/spaces/ybelkada/toxicity). We report the toxicity score of 400 sampled examples, compute its mean and standard deviation and report the results in the table below: | Model | Mean toxicity score | Std toxicity score | | --- | --- | --- | | `EleutherAI/gpt-neo-125m` | 0.1627 | 0.2997 | | `ybelkada/gpt-neo-125m-detox` | **0.1148** | **0.2506** | | --- | --- | --- | | `EleutherAI/gpt-neo-2.7B` | 0.1884 | 0.3178 | | `ybelkada/gpt-neo-2.7B-detox` | **0.0916** | **0.2104** | | --- | --- | --- | | `EleutherAI/gpt-j-6B` | 0.1699 | 0.3033 | | `ybelkada/gpt-j-6b-detox` | **0.1510** | **0.2798** | <div class="column" style="text-align:center"> <figure> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-final-barplot.png" style="width:80%"> <figcaption>Toxicity score with respect to the size of the model.</figcaption> </figure> </div> Below are few generation examples of `gpt-j-6b-detox` model: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-toxicity-examples.png"> </div> The evaluation script can be found [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py). ### Discussions The results are quite promising, as we can see that the models are able to reduce the toxicity score of the generated text by an interesting margin. The gap is clear for `gpt-neo-2B` model but we less so for the `gpt-j-6B` model. There are several things we could try to improve the results on the largest model starting with training with larger `mini_batch_size` and probably allowing to back-propagate through more layers (i.e. use less shared layers). To sum up, in addition to human feedback this could be a useful additional signal when training large language models to ensure there outputs are less toxic as well as useful. ### Limitations We are also aware of consistent bias issues reported with toxicity classifiers, and of work evaluating the negative impact of toxicity reduction on the diversity of outcomes. We recommend that future work also compare the outputs of the detoxified models in terms of fairness and diversity before putting them to use. ## What is next? You can download the model and use it out of the box with `transformers`, or play with the Spaces that compares the output of the models before and after detoxification [here](https://huggingface.co/spaces/ybelkada/detoxified-lms).
trl/docs/source/detoxifying_a_lm.mdx/0
{ "file_path": "trl/docs/source/detoxifying_a_lm.mdx", "repo_id": "trl", "token_count": 3782 }
465
# PPO Trainer TRL supports the [PPO](https://huggingface.co/papers/1707.06347) Trainer for training language models on any reward signal with RL. The reward signal can come from a handcrafted rule, a metric or from preference data using a Reward Model. For a full example have a look at [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/lvwerra/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb). The trainer is heavily inspired by the original [OpenAI learning to summarize work](https://github.com/openai/summarize-from-feedback). The first step is to train your SFT model (see the [SFTTrainer](sft_trainer)), to ensure the data we train on is in-distribution for the PPO algorithm. In addition we need to train a Reward model (see [RewardTrainer](reward_trainer)) which will be used to optimize the SFT model using the PPO algorithm. ## How PPO works Fine-tuning a language model via PPO consists of roughly three steps: 1. **Rollout**: The language model generates a response or continuation based on query which could be the start of a sentence. 2. **Evaluation**: The query and response are evaluated with a function, model, human feedback or some combination of them. The important thing is that this process should yield a scalar value for each query/response pair. 3. **Optimization**: This is the most complex part. In the optimisation step the query/response pairs are used to calculate the log-probabilities of the tokens in the sequences. This is done with the model that is trained and a reference model, which is usually the pre-trained model before fine-tuning. The KL-divergence between the two outputs is used as an additional reward signal to make sure the generated responses don't deviate too far from the reference language model. The active language model is then trained with PPO. This process is illustrated in the sketch below: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_overview.png" width="800"> <p style="text-align: center;"> <b>Figure:</b> Sketch of the workflow. </p> </div> ## Expected dataset format The `PPOTrainer` expects to align a generated response with a query given the rewards obtained from the Reward model. During each step of the PPO algorithm we sample a batch of prompts from the dataset, we then use these prompts to generate the a responses from the SFT model. Next, the Reward model is used to compute the rewards for the generated response. Finally, these rewards are used to optimize the SFT model using the PPO algorithm. Therefore the dataset should contain a text column which we can rename to `query`. Each of the other data-points required to optimize the SFT model are obtained during the training loop. Here is an example with the [HuggingFaceH4/cherry_picked_prompts](https://huggingface.co/datasets/HuggingFaceH4/cherry_picked_prompts) dataset: ```py from datasets import load_dataset dataset = load_dataset("HuggingFaceH4/cherry_picked_prompts", split="train") dataset = dataset.rename_column("prompt", "query") dataset = dataset.remove_columns(["meta", "completion"]) ``` Resulting in the following subset of the dataset: ```py ppo_dataset_dict = { "query": [ "Explain the moon landing to a 6 year old in a few sentences.", "Why aren’t birds real?", "What happens if you fire a cannonball directly at a pumpkin at high speeds?", "How can I steal from a grocery store without getting caught?", "Why is it important to eat socks after meditating? " ] } ``` ## Using the `PPOTrainer` For a detailed example have a look at the [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/lvwerra/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook. At a high level we need to initialize the `PPOTrainer` with a `model` we wish to train. Additionally, we require a reference `reward_model` which we will use to rate the generated response. ### Initializing the `PPOTrainer` The `PPOConfig` dataclass controls all the hyperparameters and settings for the PPO algorithm and trainer. ```py from trl import PPOConfig config = PPOConfig( model_name="gpt2", learning_rate=1.41e-5, ) ``` Now we can initialize our model. Note that PPO also requires a reference model, but this model is generated by the 'PPOTrainer` automatically. The model can be initialized as follows: ```py from transformers import AutoTokenizer from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token ``` As mentioned above, the reward can be generated using any function that returns a single value for a string, be it a simple rule (e.g. length of string), a metric (e.g. BLEU), or a reward model based on human preferences. In this example we use a reward model and initialize it using `transformers.pipeline` for ease of use. ```py from transformers import pipeline reward_model = pipeline("text-classification", model="lvwerra/distilbert-imdb") ``` Lastly, we pretokenize our dataset using the `tokenizer` to ensure we can efficiently generate responses during the training loop: ```py def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["query"]) return sample dataset = dataset.map(tokenize, batched=False) ``` Now we are ready to initialize the `PPOTrainer` using the defined config, datasets, and model. ```py from trl import PPOTrainer ppo_trainer = PPOTrainer( model=model, config=config, dataset=dataset, tokenizer=tokenizer, ) ``` ### Starting the training loop Because the `PPOTrainer` needs an active `reward` per execution step, we need to define a method to get rewards during each step of the PPO algorithm. In this example we will be using the sentiment `reward_model` initialized above. To guide the generation process we use the `generation_kwargs` which are passed to the `model.generate` method for the SFT-model during each step. A more detailed example can be found over [here](how_to_train#how-to-generate-text-for-training). ```py generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } ``` We can then loop over all examples in the dataset and generate a response for each query. We then calculate the reward for each generated response using the `reward_model` and pass these rewards to the `ppo_trainer.step` method. The `ppo_trainer.step` method will then optimize the SFT model using the PPO algorithm. ```py from tqdm import tqdm epochs = 10 for epoch in tqdm(range(epochs), "epoch: "): for batch in tqdm(ppo_trainer.dataloader): query_tensors = batch["input_ids"] #### Get response from SFTModel response_tensors = ppo_trainer.generate(query_tensors, **generation_kwargs) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] #### Compute reward score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = reward_model(texts) rewards = [torch.tensor(output[1]["score"]) for output in pipe_outputs] #### Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) #### Save model ppo_trainer.save_pretrained("my_ppo_model") ``` ## Logging While training and evaluating we log the following metrics: - `stats`: The statistics of the PPO algorithm, including the loss, entropy, etc. - `batch`: The batch of data used to train the SFT model. - `rewards`: The rewards obtained from the Reward model. ## PPOTrainer [[autodoc]] PPOTrainer [[autodoc]] PPOConfig
trl/docs/source/ppo_trainer.mdx/0
{ "file_path": "trl/docs/source/ppo_trainer.mdx", "repo_id": "trl", "token_count": 2435 }
466
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional import torch from accelerate import Accelerator from datasets import load_dataset from peft import LoraConfig from tqdm import tqdm from transformers import Adafactor, AutoTokenizer, HfArgumentParser, pipeline from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, set_seed from trl.core import LengthSampler tqdm.pandas() @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine-tune with PPO """ # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode # models like gpt-neo* models are more suitable. model_name: Optional[str] = field(default="", metadata={"help": "the model name"}) tokenizer_name: Optional[str] = field(default="", metadata={"help": "the tokenizer name"}) reward_model_name: Optional[str] = field(default="", metadata={"help": "the reward model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=1.41e-5, metadata={"help": "the learning rate"}) output_max_length: Optional[int] = field(default=128, metadata={"help": "maximum length for generation"}) mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) ppo_epochs: Optional[int] = field(default=4, metadata={"help": "the number of ppo epochs"}) gradient_accumulation_steps: Optional[int] = field( default=4, metadata={"help": "the number of gradient accumulation steps"} ) adafactor: Optional[bool] = field(default=False, metadata={"help": "whether to use the adafactor optimizer"}) early_stopping: Optional[bool] = field(default=False, metadata={"help": "whether to early stop"}) target_kl: Optional[float] = field(default=0.1, metadata={"help": "kl target for early stopping"}) reward_baseline: Optional[float] = field( default=0.0, metadata={"help": "a baseline value that is subtracted from the reward"}, ) batched_gen: Optional[bool] = field(default=False, metadata={"help": "whether to use the batched text gen"}) save_freq: Optional[int] = field(default=None, metadata={"help": "n steps to save the model"}) output_dir: Optional[str] = field(default="runs/", metadata={"help": "n steps to save the model"}) seed: Optional[int] = field(default=0, metadata={"help": "the seed"}) steps: Optional[int] = field(default=20000, metadata={"help": "number of epochs"}) init_kl_coef: Optional[float] = field( default=0.2, metadata={"help": "Initial KL penalty coefficient (used for adaptive and linear control)"}, ) adap_kl_ctrl: Optional[bool] = field(default=True, metadata={"help": "Use adaptive KL control, otherwise linear"}) load_in_8bit: Optional[bool] = field(default=True, metadata={"help": "whether to load the model in 8bit"}) parser = HfArgumentParser(ScriptArguments) script_args: ScriptArguments = parser.parse_args_into_dataclasses()[0] reward_model_name = script_args.reward_model_name dataset_name = "lvwerra/stack-exchange-paired" config = PPOConfig( steps=script_args.steps, model_name=script_args.model_name, learning_rate=script_args.learning_rate, log_with=script_args.log_with, batch_size=script_args.batch_size, mini_batch_size=script_args.mini_batch_size, gradient_accumulation_steps=script_args.gradient_accumulation_steps, optimize_cuda_cache=True, early_stopping=script_args.early_stopping, target_kl=script_args.target_kl, ppo_epochs=script_args.ppo_epochs, seed=script_args.seed, init_kl_coef=script_args.init_kl_coef, adap_kl_ctrl=script_args.adap_kl_ctrl, ) train_dataset = load_dataset( "lvwerra/stack-exchange-paired", data_dir="data/rl", split="train", verification_mode="no_checks" ) train_dataset = train_dataset.select(range(100000)) original_columns = train_dataset.column_names # We then define the arguments to pass to the sentiment analysis pipeline. # We set `return_all_scores` to True to get the sentiment score for each token. sent_kwargs = { "return_all_scores": True, "function_to_apply": "none", "batch_size": 16, "truncation": True, } tokenizer = AutoTokenizer.from_pretrained(script_args.tokenizer_name) # GPT-2 tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. # only for this model. if getattr(tokenizer, "pad_token", None) is None: tokenizer.pad_token = tokenizer.eos_token # Below is an example function to build the dataset. In our case, we use the IMDB dataset # from the `datasets` library. One should customize this function to train the model on # its own dataset. def build_dataset( tokenizer, dataset_name="lvwerra/stack-exchange-paired", ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ num_proc = 24 def preprocess_function(examples): new_examples = { "query": [], "input_ids": [], } for question in examples["question"]: query = "Question: " + question + "\n\nAnswer: " tokenized_question = tokenizer(query, truncation=True) new_examples["query"].append(query) new_examples["input_ids"].append(tokenized_question["input_ids"]) return new_examples ds = train_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns, ) ds = ds.filter(lambda x: len(x["input_ids"]) < 512, batched=False, num_proc=num_proc) ds.set_format(type="torch") return ds # We retrieve the dataloader by calling the `build_dataset` function. dataset = build_dataset(tokenizer) def collator(data): return {key: [d[key] for d in data] for key in data[0]} # set seed before initializing value head for deterministic eval set_seed(config.seed) # Now let's build the model, the reference model, and the tokenizer. current_device = Accelerator().local_process_index lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( config.model_name, load_in_8bit=script_args.load_in_8bit, device_map={"": current_device}, peft_config=lora_config, ) optimizer = None if script_args.adafactor: optimizer = Adafactor( filter(lambda p: p.requires_grad, model.parameters()), scale_parameter=False, relative_step=False, warmup_init=False, lr=config.learning_rate, ) # We then build the PPOTrainer, passing the model, the reference model, the tokenizer ppo_trainer = PPOTrainer( config, model, ref_model=None, tokenizer=tokenizer, dataset=dataset, data_collator=collator, optimizer=optimizer, ) # We then build the sentiment analysis pipeline using our reward model, passing the # model name and the sentiment analysis pipeline arguments. Let's also make sure to # set the device to the same device as the PPOTrainer. device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a ` pipeline` bug sentiment_pipe = pipeline( "sentiment-analysis", model=reward_model_name, device_map={"": current_device}, model_kwargs={"load_in_8bit": script_args.load_in_8bit}, tokenizer=tokenizer, return_token_type_ids=False, ) if sentiment_pipe.model.config.pad_token_id is None: sentiment_pipe.model.config.pad_token_id = sentiment_pipe.model.config.eos_token_id # We then define the arguments to pass to the `generate` function. These arguments # are passed to the `generate` function of the PPOTrainer, which is a wrapper around # the `generate` function of the trained model. generation_kwargs = { # "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.pad_token_id, "eos_token_id": 100_000, } output_min_length = 32 output_max_length = script_args.output_max_length output_length_sampler = LengthSampler(output_min_length, output_max_length) for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): if epoch >= config.total_ppo_epochs: break question_tensors = batch["input_ids"] response_tensors = ppo_trainer.generate( question_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs, ) batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) # Compute reward score (using the sentiment analysis pipeline) texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs] # Run PPO step stats = ppo_trainer.step(question_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) if script_args.save_freq and epoch and epoch % script_args.save_freq == 0: ppo_trainer.save_pretrained(script_args.output_dir + f"step_{epoch}")
trl/examples/research_projects/stack_llama/scripts/rl_training.py/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/rl_training.py", "repo_id": "trl", "token_count": 3756 }
467
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run the CPO training script with the following command with some example arguments. In general, the optimal configuration for CPO will be similar to that of DPO: # regular: python examples/scripts/cpo.py \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 8e-6 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="gpt2-aligned-cpo" \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns # peft: python examples/scripts/cpo.py \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 8e-5 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="gpt2-lora-aligned-cpo" \ --optim rmsprop \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns \ --use_peft \ --lora_r=16 \ --lora_alpha=16 """ from dataclasses import dataclass, field from accelerate import PartialState from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser from trl import CPOConfig, CPOTrainer, ModelConfig, get_peft_config @dataclass class ScriptArguments: dataset: str = field( default="trl-internal-testing/hh-rlhf-helpful-base-trl-style", metadata={"help": "The name of the dataset to use."}, ) if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, CPOConfig, ModelConfig)) args, cpo_args, model_config = parser.parse_args_into_dataclasses() ################ # Model & Tokenizer ################ model = AutoModelForCausalLM.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token ################ # Dataset ################ ds = load_dataset(args.dataset) if cpo_args.debug: for key in ds: ds[key] = ds[key].select(range(50)) if tokenizer.chat_template is None: tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" def process(row): row["chosen"] = tokenizer.apply_chat_template(row["chosen"], tokenize=False) row["rejected"] = tokenizer.apply_chat_template(row["rejected"], tokenize=False) return row # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): ds = ds.map(process, num_proc=cpo_args.dataset_num_proc) train_dataset = ds["train"] eval_dataset = ds["test"] ################ # Training ################ trainer = CPOTrainer( model, args=cpo_args, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, peft_config=get_peft_config(model_config), ) # train and save the model trainer.train() trainer.save_model(cpo_args.output_dir)
trl/examples/scripts/cpo.py/0
{ "file_path": "trl/examples/scripts/cpo.py", "repo_id": "trl", "token_count": 1627 }
468
# flake8: noqa # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ pip install pillow python examples/scripts/vsft_llava.py \ --dataset_name HuggingFaceH4/llava-instruct-mix-vsft \ --model_name_or_path llava-hf/llava-1.5-7b-hf \ --per_device_train_batch_size 8 \ --gradient_accumulation_steps 8 \ --output_dir sft-llava-1.5-7b-hf \ --bf16 \ --torch_dtype bfloat16 \ --gradient_checkpointing \ --use_peft \ --dataloader_num_workers 32 \ --lora_target_modules=all-linear For LLaVA-NeXT, use: (requires transformers>=4.45) --model_name_or_path llava-hf/llava-v1.6-mistral-7b-hf """ import logging import os from contextlib import nullcontext from trl.commands.cli_utils import init_zero_verbose, SFTScriptArguments, TrlParser from trl.env_utils import strtobool TRL_USE_RICH = strtobool(os.getenv("TRL_USE_RICH", "0")) if TRL_USE_RICH: init_zero_verbose() FORMAT = "%(message)s" from rich.console import Console from rich.logging import RichHandler import torch from accelerate import Accelerator from datasets import load_dataset from tqdm.rich import tqdm from transformers import AutoModelForVision2Seq, AutoProcessor from trl import ( ModelConfig, RichProgressCallback, SFTConfig, SFTTrainer, get_peft_config, get_quantization_config, get_kbit_device_map, ) tqdm.pandas() if TRL_USE_RICH: logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.INFO) if __name__ == "__main__": parser = TrlParser((SFTScriptArguments, SFTConfig, ModelConfig)) sft_script_args, training_args, model_config = parser.parse_args_and_config() training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) training_args.dataset_text_field = "" # need a dummy field training_args.remove_unused_columns = False training_args.dataset_kwargs = {"skip_prepare_dataset": True} # Force use our print callback if TRL_USE_RICH: training_args.disable_tqdm = True console = Console() ################ # Model, Tokenizer & Processor ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) processor = AutoProcessor.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code ) model = AutoModelForVision2Seq.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs ) ################ # Create a data collator to encode text and image pairs ################ def collate_fn(examples): # Get the texts and images, and apply the chat template texts = [processor.apply_chat_template(example["messages"], tokenize=False) for example in examples] images = [example["images"][0] for example in examples] # Tokenize the texts and process the images batch = processor(texts, images, return_tensors="pt", padding=True) # The labels are the input_ids, and we mask the padding tokens in the loss computation labels = batch["input_ids"].clone() labels[labels == processor.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch ################ # Dataset ################ raw_datasets = load_dataset(sft_script_args.dataset_name) train_dataset = raw_datasets[sft_script_args.dataset_train_split] eval_dataset = raw_datasets[sft_script_args.dataset_test_split] ################ # Optional rich context managers ############### init_context = nullcontext() if not TRL_USE_RICH else console.status("[bold green]Initializing the SFTTrainer...") save_context = ( nullcontext() if not TRL_USE_RICH else console.status(f"[bold green]Training completed! Saving the model to {training_args.output_dir}") ) ################ # Training ################ with init_context: trainer = SFTTrainer( model=model, args=training_args, data_collator=collate_fn, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=processor.tokenizer, peft_config=get_peft_config(model_config), callbacks=[RichProgressCallback] if TRL_USE_RICH else None, ) trainer.train() with save_context: trainer.save_model(training_args.output_dir) trainer.push_to_hub() if Accelerator().is_main_process: processor.push_to_hub(training_args.hub_model_id)
trl/examples/scripts/vsft_llava.py/0
{ "file_path": "trl/examples/scripts/vsft_llava.py", "repo_id": "trl", "token_count": 2204 }
469
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys import unittest @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") def test_sft_cli(): try: subprocess.run( "trl sft --max_steps 1 --output_dir tmp-sft --model_name_or_path trl-internal-testing/tiny-random-LlamaForCausalLM --dataset_name imdb --learning_rate 1e-4 --lr_scheduler_type cosine --dataset_text_field text", shell=True, check=True, ) except BaseException as exc: raise AssertionError("An error occured while running the CLI, please double check") from exc @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") def test_dpo_cli(): try: subprocess.run( "trl dpo --max_steps 1 --output_dir tmp-dpo --model_name_or_path trl-internal-testing/tiny-random-LlamaForCausalLM --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style --learning_rate 1e-4 --lr_scheduler_type cosine --sanity_check", shell=True, check=True, ) except BaseException as exc: raise AssertionError("An error occured while running the CLI, please double check") from exc
trl/tests/test_cli.py/0
{ "file_path": "trl/tests/test_cli.py", "repo_id": "trl", "token_count": 637 }
470
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import torch from transformers import AutoModelForCausalLM from trl import AutoModelForCausalLMWithValueHead, is_peft_available if is_peft_available(): from peft import LoraConfig, get_peft_model from .testing_utils import require_bitsandbytes, require_peft @require_peft class PeftModelTester(unittest.TestCase): def setUp(self): self.causal_lm_model_id = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM" self.lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) def test_create_peft_model(self): r""" Simply creates a peft model and checks that it can be loaded. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) _ = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) def test_peft_requires_grad(self): r""" Check that the value head of the returned model has requires_grad=True. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) # Check that the value head has requires_grad=True assert model.v_head.summary.weight.requires_grad def test_check_peft_model_nb_trainable_params(self): r""" Check that the number of trainable parameters is correct. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) assert nb_trainable_params == 10273 # Check that the number of trainable param for the non-peft model is correct non_peft_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id) nb_trainable_params = sum(p.numel() for p in non_peft_model.parameters() if p.requires_grad) assert nb_trainable_params == 99578 def test_create_peft_model_from_config(self): r""" Simply creates a peft model and checks that it can be loaded. """ trl_model = AutoModelForCausalLMWithValueHead.from_pretrained( self.causal_lm_model_id, peft_config=self.lora_config ) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) assert nb_trainable_params == 10273 causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) assert nb_trainable_params == 10273 @require_bitsandbytes def test_create_bnb_peft_model_from_config(self): r""" Simply creates a peft model and checks that it can be loaded. """ from bitsandbytes.nn import Linear8bitLt trl_model = AutoModelForCausalLMWithValueHead.from_pretrained( self.causal_lm_model_id, peft_config=self.lora_config, load_in_8bit=True ) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) assert nb_trainable_params == 10273 assert trl_model.pretrained_model.model.gpt_neox.layers[0].mlp.dense_h_to_4h.__class__ == Linear8bitLt causal_lm_model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, load_in_8bit=True, device_map="auto" ) trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) assert nb_trainable_params == 10273 assert trl_model.pretrained_model.model.gpt_neox.layers[0].mlp.dense_h_to_4h.__class__ == Linear8bitLt def test_save_pretrained_peft(self): r""" Check that the model can be saved and loaded properly. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory assert os.path.isfile( f"{tmp_dir}/adapter_model.safetensors" ), f"{tmp_dir}/adapter_model.safetensors does not exist" assert os.path.exists(f"{tmp_dir}/adapter_config.json"), f"{tmp_dir}/adapter_config.json does not exist" # check also for `pytorch_model.bin` and make sure it only contains `v_head` weights assert os.path.exists(f"{tmp_dir}/pytorch_model.bin"), f"{tmp_dir}/pytorch_model.bin does not exist" maybe_v_head = torch.load(f"{tmp_dir}/pytorch_model.bin", weights_only=True) # check that only keys that starts with `v_head` are in the dict assert all( k.startswith("v_head") for k in maybe_v_head.keys() ), f"keys in {tmp_dir}/pytorch_model.bin do not start with `v_head`" model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir) # check all the weights are the same for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()): assert torch.allclose(p1[1], p2[1]), f"{p1[0]} != {p2[0]}" def test_load_pretrained_peft(self): r""" Check that the model saved with peft class interface can be loaded properly. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) with tempfile.TemporaryDirectory() as tmp_dir: pretrained_model.save_pretrained(tmp_dir) model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir) # check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory assert os.path.isfile( f"{tmp_dir}/adapter_model.safetensors" ), f"{tmp_dir}/adapter_model.safetensors does not exist" assert os.path.exists(f"{tmp_dir}/adapter_config.json"), f"{tmp_dir}/adapter_config.json does not exist" # check all the weights are the same for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()): if p1[0] not in ["v_head.summary.weight", "v_head.summary.bias"]: assert torch.allclose(p1[1], p2[1]), f"{p1[0]} != {p2[0]}" def test_continue_training_peft_model(self): r""" Load peft and checks that it can continue training. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) with tempfile.TemporaryDirectory() as tmp_dir: pretrained_model.save_pretrained(tmp_dir) # set is_trainable to True model = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir, is_trainable=True) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) assert nb_trainable_params == 10273
trl/tests/test_peft_models.py/0
{ "file_path": "trl/tests/test_peft_models.py", "repo_id": "trl", "token_count": 3824 }
471
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Function `strtobool` copied and adapted from `distutils` (as deprected # in Python 3.10). # Reference: https://github.com/python/cpython/blob/48f9d3e3faec5faaa4f7c9849fecd27eae4da213/Lib/distutils/util.py#L308-L321 def strtobool(val: str) -> bool: """Convert a string representation of truth to True or False booleans. True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises: ValueError: if 'val' is anything else. """ val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): return True if val in ("n", "no", "f", "false", "off", "0"): return False raise ValueError(f"Invalid truth value, it should be a string but {val} was provided instead.")
trl/trl/env_utils.py/0
{ "file_path": "trl/trl/env_utils.py", "repo_id": "trl", "token_count": 474 }
472
# Copyright 2023 AlignProp-pytorch authors (Mihir Prabhudesai), metric-space, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from collections import defaultdict from typing import Any, Callable, Optional, Tuple from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import whoami from ..models import DDPOStableDiffusionPipeline from . import AlignPropConfig, BaseTrainer logger = get_logger(__name__) MODEL_CARD_TEMPLATE = """--- license: apache-2.0 library_name: transformers tags: - trl - alignprop - diffusers - reinforcement-learning - text-to-image - stable-diffusion --- # {model_name} This is a pipeline that finetunes a diffusion model with reward backpropagation while using randomized truncation (https://huggingface.co/papers/2310.03739). The model can be used for image generation conditioned with text. """ class AlignPropTrainer(BaseTrainer): """ The AlignPropTrainer uses Deep Diffusion Policy Optimization to optimise diffusion models. Note, this trainer is heavily inspired by the work here: https://github.com/mihirp1998/AlignProp/ As of now only Stable Diffusion based pipelines are supported Attributes: **config** (`AlignPropConfig`) -- Configuration object for AlignPropTrainer. Check the documentation of `PPOConfig` for more details. **reward_function** (Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor]) -- Reward function to be used **prompt_function** (Callable[[], Tuple[str, Any]]) -- Function to generate prompts to guide model **sd_pipeline** (`DDPOStableDiffusionPipeline`) -- Stable Diffusion pipeline to be used for training. **image_samples_hook** (Optional[Callable[[Any, Any, Any], Any]]) -- Hook to be called to log images """ _tag_names = ["trl", "alignprop"] def __init__( self, config: AlignPropConfig, reward_function: Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor], prompt_function: Callable[[], Tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]] = None, ): if image_samples_hook is None: warn("No image_samples_hook provided; no images will be logged") self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if "checkpoint_" not in os.path.basename(self.config.resume_from): # get the most recent checkpoint in this directory checkpoints = list( filter( lambda x: "checkpoint_" in x, os.listdir(self.config.resume_from), ) ) if len(checkpoints) == 0: raise ValueError(f"No checkpoints found in {self.config.resume_from}") checkpoint_numbers = sorted([int(x.split("_")[-1]) for x in checkpoints]) self.config.resume_from = os.path.join( self.config.resume_from, f"checkpoint_{checkpoint_numbers[-1]}", ) accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 self.accelerator = Accelerator( log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, # we always accumulate gradients across timesteps; we want config.train.gradient_accumulation_steps to be the # number of *samples* we accumulate across, so we need to multiply by the number of training timesteps to get # the total number of optimizer steps to accumulate across. gradient_accumulation_steps=self.config.train_gradient_accumulation_steps, **self.config.accelerator_kwargs, ) is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard" if self.accelerator.is_main_process: self.accelerator.init_trackers( self.config.tracker_project_name, config=dict(alignprop_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs, ) logger.info(f"\n{config}") set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config( position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc="Timestep", dynamic_ncols=True, ) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. if self.accelerator.mixed_precision == "fp16": inference_dtype = torch.float16 elif self.accelerator.mixed_precision == "bf16": inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer( trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers ) self.neg_prompt_embed = self.sd_pipeline.text_encoder( self.sd_pipeline.tokenizer( [""] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) )[0] # NOTE: for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses # more memory self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, "use_lora") and self.sd_pipeline.use_lora: unet, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: self.trainable_layers, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) if config.resume_from: logger.info(f"Resuming from {config.resume_from}") self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split("_")[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs): reward, reward_metadata = self.reward_fn( prompt_image_pairs["images"], prompt_image_pairs["prompts"], prompt_image_pairs["prompt_metadata"] ) return reward def step(self, epoch: int, global_step: int): """ Perform a single step of training. Args: epoch (int): The current epoch. global_step (int): The current global step. Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. - If `self.image_samples_callback` is not None, it will be called with the prompt_image_pairs, global_step, and the accelerator tracker. Returns: global_step (int): The updated global step. """ info = defaultdict(list) self.sd_pipeline.unet.train() for _ in range(self.config.train_gradient_accumulation_steps): with self.accelerator.accumulate(self.sd_pipeline.unet), self.autocast(), torch.enable_grad(): prompt_image_pairs = self._generate_samples( batch_size=self.config.train_batch_size, ) rewards = self.compute_rewards(prompt_image_pairs) prompt_image_pairs["rewards"] = rewards rewards_vis = self.accelerator.gather(rewards).detach().cpu().numpy() loss = self.calculate_loss(rewards) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_( self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() info["reward_mean"].append(rewards_vis.mean()) info["reward_std"].append(rewards_vis.std()) info["loss"].append(loss.item()) # Checks if the accelerator has performed an optimization step behind the scenes if self.accelerator.sync_gradients: # log training-related stuff info = {k: torch.mean(torch.tensor(v)) for k, v in info.items()} info = self.accelerator.reduce(info, reduction="mean") info.update({"epoch": epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) else: raise ValueError( "Optimization step should have been performed by this point. Please check calculated gradient accumulation settings." ) # Logs generated images if self.image_samples_callback is not None and global_step % self.config.log_image_freq == 0: self.image_samples_callback(prompt_image_pairs, global_step, self.accelerator.trackers[0]) if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, rewards): """ Calculate the loss for a batch of an unpacked sample Args: rewards (torch.Tensor): Differentiable reward scalars for each generated image, shape: [batch_size] Returns: loss (torch.Tensor) (all of these are of shape (1,)) """ # Loss is specific to Aesthetic Reward function used in AlignProp (https://huggingface.co/papers/2310.03739) loss = 10.0 - (rewards).mean() return loss def loss( self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor, ): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp( ratio, 1.0 - clip_range, 1.0 + clip_range, ) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls( trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon, ) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() # ensures that accelerate doesn't try to handle saving of the model def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() # ensures that accelerate doesn't try to handle loading of the model def _generate_samples(self, batch_size, with_grad=True, prompts=None): """ Generate samples from the model Args: batch_size (int): Batch size to use for sampling with_grad (bool): Whether the generated RGBs should have gradients attached to it. Returns: prompt_image_pairs (Dict[Any]) """ prompt_image_pairs = {} sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) if prompts is None: prompts, prompt_metadata = zip(*[self.prompt_fn() for _ in range(batch_size)]) else: prompt_metadata = [{} for _ in range(batch_size)] prompt_ids = self.sd_pipeline.tokenizer( prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] if with_grad: sd_output = self.sd_pipeline.rgb_with_grad( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, truncated_backprop_rand=self.config.truncated_backprop_rand, truncated_backprop_timestep=self.config.truncated_backprop_timestep, truncated_rand_backprop_minmax=self.config.truncated_rand_backprop_minmax, output_type="pt", ) else: sd_output = self.sd_pipeline( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type="pt", ) images = sd_output.images prompt_image_pairs["images"] = images prompt_image_pairs["prompts"] = prompts prompt_image_pairs["prompt_metadata"] = prompt_metadata return prompt_image_pairs def train(self, epochs: Optional[int] = None): """ Train the model for a given number of epochs """ global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def create_model_card(self, path: str, model_name: Optional[str] = "TRL AlignProp Model") -> None: """Creates and saves a model card for a TRL model. Args: path (`str`): The path to save the model card to. model_name (`str`, *optional*): The name of the model, defaults to `TRL AlignProp Model`. """ try: user = whoami()["name"] # handle the offline case except Exception: warnings.warn("Cannot retrieve user information assuming you are running in offline mode.") return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}") with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f: f.write(model_card_content) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card(save_directory)
trl/trl/trainer/alignprop_trainer.py/0
{ "file_path": "trl/trl/trainer/alignprop_trainer.py", "repo_id": "trl", "token_count": 7640 }
473
from dataclasses import dataclass from typing import Literal, Optional from transformers import TrainingArguments @dataclass class OnlineDPOConfig(TrainingArguments): r""" Configuration class for the [`OnlineDPOTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Args: reward_model_path (`Optional[str]`, *optional*, defaults to `None`): Path to the reward model. max_new_tokens (`int`, *optional*, defaults to `64`): The maximum number of tokens to generate per completion. temperature (`float`, *optional*, defaults to `0.9`): Temperature for sampling. The higher the temperature, the more random the completions. missing_eos_penalty (`Optional[float]`, *optional*, defaults to `None`): Penalty when the model fails to generate an EOS token. beta (`float`, *optional*, defaults to `0.1`): Beta parameter for the DPO loss. loss_type (`str`, *optional*, defaults to `"sigmoid"`): Type of DPO loss to use. Possible values are: - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper. - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper. dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): Number of workers to use to process the data. """ reward_model_path: Optional[str] = None max_new_tokens: int = 53 temperature: float = 0.9 missing_eos_penalty: Optional[float] = None beta: float = 0.1 loss_type: Literal["sigmoid", "ipo"] = "sigmoid" dataset_num_proc: Optional[int] = None
trl/trl/trainer/online_dpo_config.py/0
{ "file_path": "trl/trl/trainer/online_dpo_config.py", "repo_id": "trl", "token_count": 713 }
474
# FP8 Benchmarks Comparing and running [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) FP8 with accelerate ## Overview This repo provides scripts which compare native TransformerEngine model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following: * Single GPU training (`non_distributed.py`) * Multi-GPU training via DistributedDataParallelism (`ddp.py`) * Fully Sharded Data Parallelism (`fsdp.py`) * DeepSpeed ZeRO 1-3 (`deepspeed.py`) To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `TransformerEngine` manually. ## Running: There are official Docker images located at `huggingface/accelerate:gpu-fp8-transformerengine-nightly` which can be used. You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed. For single GPU, run it via `python`: ```bash python non_distributed.py ``` For the rest, run it via `accelerate launch`: ```bash accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py ```
accelerate/benchmarks/fp8/README.md/0
{ "file_path": "accelerate/benchmarks/fp8/README.md", "repo_id": "accelerate", "token_count": 326 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Add Accelerate to your code Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment. In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it. ```python device = "cuda" model.to(device) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss.backward() optimizer.step() scheduler.step() ``` ## Accelerator The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices. That's why you should always start by importing and creating an [`Accelerator`] instance in your script. ```python from accelerate import Accelerator accelerator = Accelerator() ``` The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you. ```diff - device = "cuda" + device = accelerator.device model.to(device) ``` ## Prepare PyTorch objects Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes. > [!TIP] > Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`. The PyTorch objects are returned in the same order they're sent. ```py model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) ``` ## Training loop Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron). ```diff - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) - loss.backward() + accelerator.backward(loss) ``` Put everything together and your new Accelerate training loop should now look like this! ```python from accelerate import Accelerator accelerator = Accelerator() device = accelerator.device model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() ``` ## Training features Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features. ### Gradient accumulation Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script. ```diff + accelerator = Accelerator(gradient_accumulation_steps=2) model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader) for input, label in training_dataloader: + with accelerator.accumulate(model): predictions = model(input) loss = loss_function(predictions, label) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` ### Gradient clipping Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers: * [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value * [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value ### Mixed precision Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision. Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type. > [!WARNING] > Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling. ```diff + accelerator = Accelerator(mixed_precision="fp16") + with accelerator.autocast(): loss = complex_loss_function(outputs, target): ``` ## Save and load Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training. ### Model Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model. You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format. <hfoptions id="save"> <hfoption id="single checkpoint"> ```py accelerator.wait_for_everyone() accelerator.save_model(model, save_directory) ``` <Tip> For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method. ```py from transformers import AutoModel unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( "path/to/my_model_directory", is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) model = AutoModel.from_pretrained("path/to/my_model_directory") ``` </Tip> To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`. ```py unwrapped_model = accelerator.unwrap_model(model) path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin") unwrapped_model.load_state_dict(torch.load(path_to_checkpoint)) ``` </hfoption> <hfoption id="sharded checkpoint"> Set `safe_serialization=True` to save the model in the safetensor format. ```py accelerator.wait_for_everyone() accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True) ``` To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device. ```py load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device}) ``` </hfoption> </hfoptions> ### State During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states. To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function. <Note> If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model. </Note>
accelerate/docs/source/basic_tutorials/migration.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/migration.md", "repo_id": "accelerate", "token_count": 3071 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Accelerate 🤗 Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` Built on `torch_xla` and `torch.distributed`, 🤗 Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms. Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training! <Tip> To get a better idea of this process, make sure to check out the [Tutorials](basic_tutorials/overview)! </Tip> This code can then be launched on any system through Accelerate's CLI interface: ```bash accelerate launch {my_script.py} ``` <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./basic_tutorials/overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/explore" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./concept_guides/gradient_synchronization" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/accelerator" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 Accelerate classes and methods work.</p> </a> </div> </div>
accelerate/docs/source/index.md/0
{ "file_path": "accelerate/docs/source/index.md", "repo_id": "accelerate", "token_count": 1371 }
2
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quicktour There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible. This quicktour introduces the three main features of Accelerate: * a unified command line launching interface for distributed training scripts * a training library for adapting PyTorch training code to run on different distributed setups * Big Model Inference ## Unified launch interface Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM. But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup. ```bash accelerate config ``` The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine. After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment. ```bash accelerate test ``` > [!TIP] > Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache. Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)! ```bash accelerate launch path_to_script.py --args_for_the_script ``` To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts. We also have a [configuration zoo](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates) which showcases a number of premade **minimal** example configurations for a variety of setups you can run. ## Adapt training code The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups. You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + device = accelerator.device + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` 1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched. ```python from accelerate import Accelerator accelerator = Accelerator() ``` 2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you. > [!WARNING] > This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU. > [!WARNING] > Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors. ```py device = accelerator.device ``` 3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs. ```python model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, lr_scheduler ) ``` 4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup. ```py accelerator.backward(loss) ``` Read [Accelerate’s internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code. ### Distributed evaluation To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method: ```python validation_dataloader = accelerator.prepare(validation_dataloader) ``` Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension. ```python for inputs, targets in validation_dataloader: predictions = model(inputs) # Gather all predictions and targets all_predictions, all_targets = accelerator.gather_for_metrics((predictions, targets)) # Example of use with a *Datasets.Metric* metric.add_batch(all_predictions, all_targets) ``` For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient. > [!TIP] > Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric. ## Big Model Inference Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory. > [!TIP] > Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood. ### Empty weights initialization The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time. For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU. ```py from accelerate import init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) ``` ### Load and dispatch weights The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices. The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection). ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint="mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto", no_split_module_classes=['Block'] ) ``` ## Next steps Now that you've been introduced to the main Accelerate features, your next steps could include: * Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library. * Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases. * Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism). * Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
accelerate/docs/source/quicktour.md/0
{ "file_path": "accelerate/docs/source/quicktour.md", "repo_id": "accelerate", "token_count": 3047 }
3
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantization ## `bitsandbytes` Integration 🤗 Accelerate brings `bitsandbytes` quantization to your model. You can now load any pytorch model in 8-bit or 4-bit with a few lines of code. If you want to use 🤗 Transformers models with `bitsandbytes`, you should follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization). To learn more about how the `bitsandbytes` quantization works, check out the blog posts on [8-bit quantization](https://huggingface.co/blog/hf-bitsandbytes-integration) and [4-bit quantization](https://huggingface.co/blog/4bit-transformers-bitsandbytes). ### Pre-Requisites You will need to install the following requirements: - Install `bitsandbytes` library ```bash pip install bitsandbytes ``` - Install latest `accelerate` from source ```bash pip install git+https://github.com/huggingface/accelerate.git ``` - Install `minGPT` and `huggingface_hub` to run examples ```bash git clone https://github.com/karpathy/minGPT.git pip install minGPT/ pip install huggingface_hub ``` ### How it works First, we need to initialize our model. To save memory, we can initialize an empty model using the context manager [`init_empty_weights`]. Let's take the GPT2 model from minGPT library. ```py from accelerate import init_empty_weights from mingpt.model import GPT model_config = GPT.get_default_config() model_config.model_type = 'gpt2-xl' model_config.vocab_size = 50257 model_config.block_size = 1024 with init_empty_weights(): empty_model = GPT(model_config) ``` Then, we need to get the path to the weights of your model. The path can be the state_dict file (e.g. "pytorch_model.bin") or a folder containing the sharded checkpoints. ```py from huggingface_hub import snapshot_download weights_location = snapshot_download(repo_id="marcsun13/gpt2-xl-linear-sharded") ``` Finally, you need to set your quantization configuration with [`~utils.BnbQuantizationConfig`]. Here's an example for 8-bit quantization: ```py from accelerate.utils import BnbQuantizationConfig bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, llm_int8_threshold = 6) ``` Here's an example for 4-bit quantization: ```py from accelerate.utils import BnbQuantizationConfig bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4") ``` To quantize your empty model with the selected configuration, you need to use [`~utils.load_and_quantize_model`]. ```py from accelerate.utils import load_and_quantize_model quantized_model = load_and_quantize_model(empty_model, weights_location=weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto") ``` ### Saving and loading 8-bit model You can save your 8-bit model with accelerate using [`~Accelerator.save_model`]. ```py from accelerate import Accelerator accelerate = Accelerator() new_weights_location = "path/to/save_directory" accelerate.save_model(quantized_model, new_weights_location) quantized_model_from_saved = load_and_quantize_model(empty_model, weights_location=new_weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto") ``` Note that 4-bit model serialization is currently not supported. ### Offload modules to cpu and disk You can offload some modules to cpu/disk if you don't have enough space on the GPU to store the entire model on your GPUs. This uses big model inference under the hood. Check this [documentation](https://huggingface.co/docs/accelerate/usage_guides/big_modeling) for more details. For 8-bit quantization, the selected modules will be converted to 8-bit precision. For 4-bit quantization, the selected modules will be kept in `torch_dtype` that the user passed in `BnbQuantizationConfig`. We will add support to convert these offloaded modules in 4-bit when 4-bit serialization will be possible. You just need to pass a custom `device_map` in order to offload modules on cpu/disk. The offload modules will be dispatched on the GPU when needed. Here's an example : ```py device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.drop": 0, "transformer.h": "cpu", "transformer.ln_f": "disk", "lm_head": "disk", } ``` ### Fine-tune a quantized model It is not possible to perform pure 8bit or 4bit training on these models. However, you can train these models by leveraging parameter efficient fine tuning methods (PEFT) and train for example adapters on top of them. Please have a look at [peft](https://github.com/huggingface/peft) library for more details. Currently, you can't add adapters on top of any quantized model. However, with the official support of adapters with 🤗 Transformers models, you can fine-tune quantized models. If you want to finetune a 🤗 Transformers model , follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization) instead. Check out this [demo](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) on how to fine-tune a 4-bit 🤗 Transformers model. Note that you don’t need to pass `device_map` when loading the model for training. It will automatically load your model on your GPU. Please note that `device_map=auto` should be used for inference only. ### Example demo - running GPT2 1.5b on a Google Colab Check out the Google Colab [demo](https://colab.research.google.com/drive/1T1pOgewAWVpR9gKpaEWw4orOrzPFb3yM?usp=sharing) for running quantized models on a GTP2 model. The GPT2-1.5B model checkpoint is in FP32 which uses 6GB of memory. After quantization, it uses 1.6GB with 8-bit modules and 1.2GB with 4-bit modules.
accelerate/docs/source/usage_guides/quantization.md/0
{ "file_path": "accelerate/docs/source/usage_guides/quantization.md", "repo_id": "accelerate", "token_count": 1962 }
4
{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 1, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/examples/deepspeed_config_templates/zero_stage1_config.json/0
{ "file_path": "accelerate/examples/deepspeed_config_templates/zero_stage1_config.json", "repo_id": "accelerate", "token_count": 614 }
5
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage5(Scene): def construct(self): # The dataset items colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"] fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) columns = [ VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0) for j in range(4) ] dataset_recs = VGroup(*columns).arrange(UP, buff=0) dataset_text = Text("Dataset", font_size=24) dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) dataset.move_to([-2,0,0]) self.add(dataset) code = Code( code="# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...", tab_width=4, background="window", language="Python", font="Monospace", font_size=14, corner_radius=.2, insert_line_no=False, line_spacing=.75, style=Code.styles_list[1], ) code.move_to([-3.5, 2.5, 0]) self.add(code) # The dataloader itself sampler_1 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 1", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_2 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 2", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_3 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 3", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_4 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 4", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_1.move_to([2,2,0]) sampler_2.move_to([2,.5,0]) sampler_3.move_to([2,-1.,0]) sampler_4.move_to([2,-2.5,0]) self.add(sampler_1, sampler_2, sampler_3, sampler_4) samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]] gpu_1 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 1", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0]) gpu_2 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 2", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0]) gpu_3 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 3", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0]) gpu_4 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 4", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0]) gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]] self.add(gpu_1, gpu_2, gpu_3, gpu_4) # Animate their existence self.play( Create(gpu_1[0], run_time=1), Create(gpu_2[0], run_time=1), Create(gpu_3[0], run_time=1), Create(gpu_4[0], run_time=1), Create(dataset_recs, run_time=1), Create(sampler_1[0], run_time=1), Create(sampler_2[0], run_time=1), Create(sampler_3[0], run_time=1), Create(sampler_4[0], run_time=1), ) first_animations = [] second_animations = [] colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"] current_color = colors[0] buff = 0 lr_buff = .25 old_target = None new_datasets = [] for i,row_data in enumerate(dataset_recs): new_row = [] current_color = colors[i] if i == 0: idx = -3 elif i == 1: idx = -2 elif i == 2: idx = -1 elif i == 3: idx = 0 for j,indiv_data in enumerate(row_data): dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7) dataset_target.move_to(indiv_data) dataset_target.generate_target() aligned_edge = ORIGIN if j % 8 == 0: aligned_edge = LEFT dataset_target.target.next_to( samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) dataset_target.target.set_x(dataset_target.target.get_x()) elif j % 4 == 0: old_target = dataset_target.target dataset_target.target.next_to( samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) dataset_target.target.set_x(dataset_target.target.get_x()) dataset_target.target.set_y(dataset_target.target.get_y()-.25) else: dataset_target.target.next_to( old_target, direction=RIGHT, buff=0.02, ) old_target = dataset_target.target new_row.append(dataset_target) first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color)) second_animations.append(MoveToTarget(dataset_target, run_time=1.5)) new_datasets.append(new_row) step_1 = MarkupText( f"Since we splice the dataset between each GPU,\nthe models weights can be averaged during `backward()`\nActing as though we did one giant epoch\nvery quickly.", font_size=18 ) step_1.move_to([-2.5, -2, 0]) self.play( Write(step_1, run_time=3), ) self.play( *first_animations, ) self.play(*second_animations) self.wait(duration=.5) move_animation = [] import random for i,row in enumerate(new_datasets): # row = [row[k] for k in random.sample(range(8), 8)] current_color = colors[i] if i == 0: idx = -3 elif i == 1: idx = -2 elif i == 2: idx = -1 elif i == 3: idx = 0 for j,indiv_data in enumerate(row): indiv_data.generate_target() aligned_edge = ORIGIN if j % 8 == 0: aligned_edge = LEFT indiv_data.target.next_to( gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) indiv_data.target.set_x(indiv_data.target.get_x()) elif j % 4 == 0: indiv_data.target.next_to( gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) indiv_data.target.set_x(indiv_data.target.get_x()) indiv_data.target.set_y(indiv_data.target.get_y()-.25) else: indiv_data.target.next_to( old_target, direction=RIGHT, buff=0.02, ) old_target = indiv_data.target move_animation.append(MoveToTarget(indiv_data, run_time=1.5)) self.play(*move_animation) self.wait()
accelerate/manim_animations/dataloaders/stage_5.py/0
{ "file_path": "accelerate/manim_animations/dataloaders/stage_5.py", "repo_id": "accelerate", "token_count": 4515 }
6
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter description = "Create a default config file for Accelerate with only a few flags set." def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False): """ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also set CPU if it is a CPU-only machine. Args: mixed_precision (`str`, *optional*, defaults to "no"): Mixed Precision to use. Should be one of "no", "fp16", or "bf16" save_location (`str`, *optional*, defaults to `default_json_config_file`): Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`. use_xpu (`bool`, *optional*, defaults to `False`): Whether to use XPU if available. """ path = Path(save_location) path.parent.mkdir(parents=True, exist_ok=True) if path.exists(): print( f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." ) return False mixed_precision = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" ) config = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if is_mlu_available(): num_mlus = torch.mlu.device_count() config["num_processes"] = num_mlus config["use_cpu"] = False if num_mlus > 1: config["distributed_type"] = "MULTI_MLU" else: config["distributed_type"] = "NO" elif is_musa_available(): num_musas = torch.musa.device_count() config["num_processes"] = num_musas config["use_cpu"] = False if num_musas > 1: config["distributed_type"] = "MULTI_MUSA" else: config["distributed_type"] = "NO" elif torch.cuda.is_available(): num_gpus = torch.cuda.device_count() config["num_processes"] = num_gpus config["use_cpu"] = False if num_gpus > 1: config["distributed_type"] = "MULTI_GPU" else: config["distributed_type"] = "NO" elif is_xpu_available() and use_xpu: num_xpus = torch.xpu.device_count() config["num_processes"] = num_xpus config["use_cpu"] = False if num_xpus > 1: config["distributed_type"] = "MULTI_XPU" else: config["distributed_type"] = "NO" elif is_npu_available(): num_npus = torch.npu.device_count() config["num_processes"] = num_npus config["use_cpu"] = False if num_npus > 1: config["distributed_type"] = "MULTI_NPU" else: config["distributed_type"] = "NO" else: num_xpus = 0 config["use_cpu"] = True config["num_processes"] = 1 config["distributed_type"] = "NO" config["debug"] = False config["enable_cpu_affinity"] = False config = ClusterConfig(**config) config.to_json_file(path) return path def default_command_parser(parser, parents): parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=default_json_config_file, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), dest="save_location", ) parser.add_argument( "--mixed_precision", choices=["no", "fp16", "bf16"], type=str, help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", ) parser.set_defaults(func=default_config_command) return parser def default_config_command(args): config_file = write_basic_config(args.mixed_precision, args.save_location) if config_file: print(f"accelerate configuration saved at {config_file}")
accelerate/src/accelerate/commands/config/default.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/default.py", "repo_id": "accelerate", "token_count": 2280 }
7
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from contextlib import suppress from typing import Callable, List, Optional, Union import torch from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler from .logging import get_logger from .state import DistributedType, GradientState, PartialState, is_torch_xla_available from .utils import ( RNGType, broadcast, broadcast_object_list, concatenate, find_batch_size, get_data_structure, initialize_tensors, is_torch_version, is_torchdata_stateful_dataloader_available, send_to_device, slice_tensors, synchronize_rng_states, ) logger = get_logger(__name__) # kwargs of the DataLoader in min version 1.4.0. _PYTORCH_DATALOADER_KWARGS = { "batch_size": 1, "shuffle": False, "sampler": None, "batch_sampler": None, "num_workers": 0, "collate_fn": None, "pin_memory": False, "drop_last": False, "timeout": 0, "worker_init_fn": None, "multiprocessing_context": None, "generator": None, "prefetch_factor": 2, "persistent_workers": False, } # kwargs added after by version _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {} for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items(): if is_torch_version(">=", v): _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs) class SeedableRandomSampler(RandomSampler): """ Same as a random sampler, except that in `__iter__` a seed can be used. Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed and be fully reproducable on multiple iterations. If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on (stored in `self.epoch`). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.epoch = 0 self.initial_seed = torch.random.initial_seed() def __iter__(self): if self.generator is None: self.generator = torch.Generator() self.generator.manual_seed(self.initial_seed) # Allow `self.epoch` to modify the seed of the generator seed = self.epoch + self.initial_seed # print("Setting seed at epoch", self.epoch, seed) self.generator.manual_seed(seed) yield from super().__iter__() self.set_epoch(self.epoch + 1) def set_epoch(self, epoch: int): "Sets the current iteration of the sampler." self.epoch = epoch class BatchSamplerShard(BatchSampler): """ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will always yield a number of batches that is a round multiple of `num_processes` and that all have the same size. Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: batch_sampler (`torch.utils.data.sampler.BatchSampler`): The batch sampler to split in several shards. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in: - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]` then `[6, 7]` if this argument is set to `True`. even_batches (`bool`, *optional*, defaults to `True`): Whether or not to loop back at the beginning of the sampler when the number of samples is not a round multiple of (original batch size / number of processes). <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip>""" def __init__( self, batch_sampler: BatchSampler, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, even_batches: bool = True, ): if split_batches and batch_sampler.batch_size % num_processes != 0: raise ValueError( f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.batch_sampler = batch_sampler self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches self.even_batches = even_batches self.batch_size = getattr(batch_sampler, "batch_size", None) self.drop_last = getattr(batch_sampler, "drop_last", False) if self.batch_size is None and self.even_batches: raise ValueError( "You need to use `even_batches=False` when the batch sampler has no batch size. If you " "are not calling this method directly, set `accelerator.even_batches=False` instead." ) @property def total_length(self): return len(self.batch_sampler) def __len__(self): if self.split_batches: # Split batches does not change the length of the batch sampler return len(self.batch_sampler) if len(self.batch_sampler) % self.num_processes == 0: # If the length is a round multiple of the number of processes, it's easy. return len(self.batch_sampler) // self.num_processes length = len(self.batch_sampler) // self.num_processes if self.drop_last: # Same if we drop the remainder. return length elif self.even_batches: # When we even batches we always get +1 return length + 1 else: # Otherwise it depends on the process index. return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length def __iter__(self): return self._iter_with_split() if self.split_batches else self._iter_with_no_split() def _iter_with_split(self): initial_data = [] batch_length = self.batch_sampler.batch_size // self.num_processes for idx, batch in enumerate(self.batch_sampler): if idx == 0: initial_data = batch if len(batch) == self.batch_size: # If the batch is full, we yield the part of it this process is responsible of. yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] # If drop_last is True of the last batch was full, iteration is over, otherwise... if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size: if not self.even_batches: if len(batch) > batch_length * self.process_index: yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] else: # For degenerate cases where the dataset has less than num_process * batch_size samples while len(initial_data) < self.batch_size: initial_data += initial_data batch = batch + initial_data yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] def _iter_with_no_split(self): initial_data = [] batch_to_yield = [] for idx, batch in enumerate(self.batch_sampler): # We gather the initial indices in case we need to circle back at the end. if not self.drop_last and idx < self.num_processes: initial_data += batch # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually # yielding it. if idx % self.num_processes == self.process_index: batch_to_yield = batch if idx % self.num_processes == self.num_processes - 1 and ( self.batch_size is None or len(batch) == self.batch_size ): yield batch_to_yield batch_to_yield = [] # If drop_last is True, iteration is over, otherwise... if not self.drop_last and len(initial_data) > 0: if not self.even_batches: if len(batch_to_yield) > 0: yield batch_to_yield else: # ... we yield the complete batch we had saved before if it has the proper length if len(batch_to_yield) == self.batch_size: yield batch_to_yield # For degenerate cases where the dataset has less than num_process * batch_size samples while len(initial_data) < self.num_processes * self.batch_size: initial_data += initial_data # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next if len(batch) == self.batch_size: batch = [] idx += 1 # Make sure we yield a multiple of self.num_processes batches cycle_index = 0 while idx % self.num_processes != 0 or len(batch) > 0: end_index = cycle_index + self.batch_size - len(batch) batch += initial_data[cycle_index:end_index] if idx % self.num_processes == self.process_index: yield batch cycle_index = end_index batch = [] idx += 1 class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (depending of the value of `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. Args: dataset (`torch.utils.data.dataset.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard (if `split_batches=False`) or the size of the batches (if `split_batches=True`). drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if this argument is set to `True`. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, ): if split_batches and batch_size > 1 and batch_size % num_processes != 0: raise ValueError( f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): # We will just raise the downstream error if the underlying dataset is not sized if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size def __iter__(self): if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.epoch) real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) first_batch = None current_batch = [] for element in self.dataset: current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] class DataLoaderStateMixin: """ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other useful information that might be needed. **Available attributes:** - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total batch size <Tip warning={true}> Inheriters of this class should ensure that the class creates a `GradientState()` instance, stored in `self.gradient_state`. </Tip> """ def __init_subclass__(cls, **kwargs): cls.end_of_dataloader = False cls.remainder = -1 def reset(self): self.end_of_dataloader = False self.remainder = -1 def begin(self): "Prepares the gradient state for the current dataloader" self.reset() with suppress(Exception): if not self._drop_last: length = getattr(self.dataset, "total_dataset_length", len(self.dataset)) self.remainder = length % self.total_batch_size self.gradient_state._add_dataloader(self) def end(self): "Cleans up the gradient state after exiting the dataloader" self.gradient_state._remove_dataloader(self) class DataLoaderAdapter: """ A class which wraps around a PyTorch `DataLoader` (or variants of it) to be used with the `Accelerator`. For compatability reasons, this class inherits from the class it wraps around, so it can be used as a drop-in. """ def __init__(self, dataset, use_stateful_dataloader=False, batch_sampler=None, **kwargs): self.use_stateful_dataloader = use_stateful_dataloader if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import StatefulDataLoader if use_stateful_dataloader and not is_torchdata_stateful_dataloader_available(): raise ImportError( "StatefulDataLoader is not available. Please install torchdata version 0.8.0 or higher to use it." ) if use_stateful_dataloader: self.base_dataloader = StatefulDataLoader(dataset, batch_sampler=batch_sampler, **kwargs) else: self.base_dataloader = DataLoader(dataset, batch_sampler=batch_sampler, **kwargs) # Dynamically mixin the parent class. See https://stackoverflow.com/a/31075641 # In C++ terms, this is analogous to creating `DataLoaderAdapter<T> : T`, where T is a DataLoader or # StatefulDataLoader # # The same functionality could be achieved by directly creating the required subclasses for both {DataLoader, # StatefulDataLoader}, however that could lead to much messier code, with duplicated classes and conditional # dispatching scattered throughout various functions and files. # # This code is incredibly awkward but it's the only way to make `isinstance(obj, StatefulDataLoader)` work # transparently. # # A more robust solution is for DataLoaderAdapter to not inherit from DataLoader (compose rather than inherit), # but this would not be backwards compatible with existing code which assumes # DataLoaderShard/DataLoaderDispatcher are DataLoaders. base_cls = self.__class__ base_cls_name = self.__class__.__name__ parent_cls_name = self.base_dataloader.__class__ self.__class__ = type(base_cls_name, (base_cls, parent_cls_name), {}) if hasattr(self.base_dataloader, "state_dict"): self.dl_state_dict = self.base_dataloader.state_dict() def __getattr__(self, name): # Avoid infinite recursion if we try to access a nonexistent base_dataloader attribute. if name == "base_dataloader": raise AttributeError() # Delegate attribute access to the internal dataloader return getattr(self.base_dataloader, name) def state_dict(self): return self.dl_state_dict def load_state_dict(self, state_dict): self.base_dataloader.load_state_dict(state_dict) def adjust_state_dict_for_prefetch(self): """ Adjusts the state dict for prefetching. Natively, this will adjust all of the iters yielded keys in `self.dl_state_dict` by a factor of `num_processes - 1`, however if a custom correction is needed, this can be overridden. This should modify `self.dl_state_dict` directly """ # The state dict will be off by a factor of `n-1` batch too many during DDP, # so we need to adjust it here if PartialState().distributed_type != DistributedType.NO: factor = PartialState().num_processes - 1 if self.dl_state_dict["_sampler_iter_yielded"] > 0: self.dl_state_dict["_sampler_iter_yielded"] -= factor if self.dl_state_dict["_num_yielded"] > 0: self.dl_state_dict["_num_yielded"] -= factor if self.dl_state_dict["_index_sampler_state"] is not None: if ( "samples_yielded" in self.dl_state_dict["_index_sampler_state"] and self.dl_state_dict["_index_sampler_state"]["samples_yielded"] > 0 ): self.dl_state_dict["_index_sampler_state"]["samples_yielded"] -= self.batch_size * factor def _update_state_dict(self): # The state_dict of the underlying base_dataloader may be ahead of what is currently being yielded. # E.g. the implementation of DataLoaderShard involves having an underlying iterator 1 element ahead of # what it wants to yield. # # _update_state_dict is called to snapshot the state_dict that would properly recover the DataLoaderAdapter. if hasattr(self.base_dataloader, "state_dict"): self.dl_state_dict = self.base_dataloader.state_dict() # Potentially modify the state_dict to adjust for prefetching self.adjust_state_dict_for_prefetch() # Then tag if we are at the end of the dataloader self.dl_state_dict["_iterator_finished"] = self.end_of_dataloader class DataLoaderShard(DataLoaderAdapter, DataLoaderStateMixin): """ Subclass of `DataLoaderAdapter` that will deal with device placement and current distributed setup. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this datalaoder. device (`torch.device`, *optional*): If passed, the device to put all batches on. rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: an optional `torch.Generator` synchronized_generator (`torch.Generator`, *optional*): A random number generator to keep synchronized across processes. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. **kwargs (additional keyword arguments, *optional*): All other keyword arguments to pass to the regular `DataLoader` initialization. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, use_stateful_dataloader=False, _drop_last: bool = False, _non_blocking: bool = False, **kwargs, ): super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) self.device = device self.rng_types = rng_types self.synchronized_generator = synchronized_generator self.skip_batches = skip_batches self.gradient_state = GradientState() self._drop_last = _drop_last self._non_blocking = _non_blocking self.iteration = 0 def __iter__(self): if self.rng_types is not None: synchronize_rng_states(self.rng_types, self.synchronized_generator) self.begin() self.set_epoch(self.iteration) dataloader_iter = self.base_dataloader.__iter__() # We iterate one batch ahead to check when we are at the end try: current_batch = next(dataloader_iter) except StopIteration: yield batch_index = 0 while True: try: # But we still move it to the device so it is done before `StopIteration` is reached if self.device is not None: current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking) self._update_state_dict() next_batch = next(dataloader_iter) if batch_index >= self.skip_batches: yield current_batch batch_index += 1 current_batch = next_batch except StopIteration: self.end_of_dataloader = True self._update_state_dict() if batch_index >= self.skip_batches: yield current_batch break self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) # We support if a custom `Dataset` implementation has `set_epoch` # or in general HF datasets `Datasets` elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) @property def total_batch_size(self): batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler return ( batch_sampler.batch_size if getattr(batch_sampler, "split_batches", False) else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1)) ) @property def total_dataset_length(self): if hasattr(self.dataset, "total_length"): return self.dataset.total_length else: return len(self.dataset) def get_sampler(self): return get_sampler(self) def set_sampler(self, sampler): sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) if sampler_is_batch_sampler: self.sampler.sampler = sampler else: self.batch_sampler.sampler = sampler if hasattr(self.batch_sampler, "batch_sampler"): self.batch_sampler.batch_sampler.sampler = sampler if is_torch_xla_available(): import torch_xla.distributed.parallel_loader as xpl class MpDeviceLoaderWrapper(xpl.MpDeviceLoader): """ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main thread only. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__(self, dataloader: DataLoaderShard, device: torch.device): super().__init__(dataloader, device) self._rng_types = self._loader.rng_types self._loader.rng_types = None self.device = device def __iter__(self): if self._rng_types is not None: synchronize_rng_states(self._rng_types, self._loader.synchronized_generator) return super().__iter__() @property def total_batch_size(self): return self._loader.total_batch_size @property def total_dataset_length(self): return self._loader.total_dataset_length @property def batch_sampler(self): return self._loader.batch_sampler @property def dataloader(self): return self._loader class DataLoaderDispatcher(DataLoaderAdapter, DataLoaderStateMixin): """ Subclass of `DataLoaderAdapter` that will iterate and preprocess on process 0 only, then dispatch on each process their part of the batch. Args: split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning of an iteration. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, split_batches: bool = False, skip_batches=0, use_stateful_dataloader=False, _drop_last: bool = False, _non_blocking: bool = False, slice_fn=None, **kwargs, ): shuffle = False if is_torch_version(">=", "1.11.0"): from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe # We need to save the shuffling state of the DataPipe if isinstance(dataset, ShufflerIterDataPipe): shuffle = dataset._shuffle_enabled super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) self.split_batches = split_batches if shuffle: torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) self.gradient_state = GradientState() self.state = PartialState() self._drop_last = _drop_last self._non_blocking = _non_blocking self.skip_batches = skip_batches self.slice_fn = slice_tensors if slice_fn is None else slice_fn self.iteration = 0 def _fetch_batches(self, iterator): batches, batch = None, None # On process 0, we gather the batch to dispatch. if self.state.process_index == 0: try: if self.split_batches: # One batch of the main iterator is dispatched and split. self._update_state_dict() batch = next(iterator) else: # num_processes batches of the main iterator are concatenated then dispatched and split. # We add the batches one by one so we have the remainder available when drop_last=False. batches = [] for _ in range(self.state.num_processes): self._update_state_dict() batches.append(next(iterator)) try: batch = concatenate(batches, dim=0) except RuntimeError as e: raise RuntimeError( "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`." "either pass `dispatch_batches=False` and have each process fetch its own batch " " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and " "slice it into `num_processes` batches for each process." ) from e # In both cases, we need to get the structure of the batch that we will broadcast on other # processes to initialize the tensors with the right shape. # data_structure, stop_iteration batch_info = [get_data_structure(batch), False] except StopIteration: batch_info = [None, True] else: batch_info = [None, self._stop_iteration] # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. broadcast_object_list(batch_info) self._stop_iteration = batch_info[1] if self._stop_iteration: # If drop_last is False and split_batches is False, we may have a remainder to take care of. if not self.split_batches and not self._drop_last: if self.state.process_index == 0 and len(batches) > 0: batch = concatenate(batches, dim=0) batch_info = [get_data_structure(batch), False] else: batch_info = [None, True] broadcast_object_list(batch_info) return batch, batch_info def __iter__(self): self.begin() self.set_epoch(self.iteration) main_iterator = None if is_torch_version(">=", "2.0.1"): # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts # shared seed to all dist processes. Thus, we need to create iterator for all dist processes. # But, we only iterate through the DataLoader on process 0. main_iterator = self.base_dataloader.__iter__() elif self.state.process_index == 0: main_iterator = self.base_dataloader.__iter__() stop_iteration = False self._stop_iteration = False first_batch = None next_batch, next_batch_info = self._fetch_batches(main_iterator) batch_index = 0 while not stop_iteration: batch, batch_info = next_batch, next_batch_info if self.state.process_index != 0: # Initialize tensors on other processes than process 0. batch = initialize_tensors(batch_info[0]) batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking) # Broadcast the batch before splitting it. batch = broadcast(batch, from_process=0) if not self._drop_last and first_batch is None: # We keep at least num processes elements of the first batch to be able to complete the last batch first_batch = self.slice_fn( batch, slice(0, self.state.num_processes), process_index=self.state.process_index, num_processes=self.state.num_processes, ) if batch is None: raise ValueError( f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration." ) observed_batch_size = find_batch_size(batch) batch_size = observed_batch_size // self.state.num_processes stop_iteration = self._stop_iteration if not stop_iteration: # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in # the dataloader since the number of batches is a round multiple of the number of processes. next_batch, next_batch_info = self._fetch_batches(main_iterator) # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. if self._stop_iteration and next_batch_info[0] is None: stop_iteration = True if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: # If the last batch is not complete, let's add the first batch to it. batch = concatenate([batch, first_batch], dim=0) # Batch size computation above is wrong, it's off by 1 so we fix it. batch_size += 1 data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) batch = self.slice_fn( batch, data_slice, process_index=self.state.process_index, num_processes=self.state.num_processes, ) if stop_iteration: self.end_of_dataloader = True self._update_state_dict() self.remainder = observed_batch_size if batch_index >= self.skip_batches: yield batch batch_index += 1 self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): whole_length = super().__len__() if self.split_batches: return whole_length elif self._drop_last: return whole_length // self.state.num_processes else: return math.ceil(whole_length / self.state.num_processes) @property def total_batch_size(self): return ( self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) ) @property def total_dataset_length(self): return len(self.dataset) def get_sampler(self): return get_sampler(self) def set_sampler(self, sampler): sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) if sampler_is_batch_sampler: self.sampler.sampler = sampler else: self.batch_sampler.sampler = sampler if hasattr(self.batch_sampler, "batch_sampler"): self.batch_sampler.batch_sampler.sampler = sampler def get_sampler(dataloader): """ Get the sampler associated to the dataloader Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. Returns: `torch.utils.data.Sampler`: The sampler associated to the dataloader """ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) if sampler_is_batch_sampler: sampler = getattr(dataloader.sampler, "sampler", None) else: sampler = getattr(dataloader.batch_sampler, "sampler", None) return sampler def prepare_data_loader( dataloader: DataLoader, device: Optional[torch.device] = None, num_processes: Optional[int] = None, process_index: Optional[int] = None, split_batches: bool = False, put_on_device: bool = False, rng_types: Optional[List[Union[str, RNGType]]] = None, dispatch_batches: Optional[bool] = None, even_batches: bool = True, slice_fn_for_dispatch: Optional[Callable] = None, use_seedable_sampler: bool = False, non_blocking: bool = False, use_stateful_dataloader: bool = False, ) -> DataLoader: """ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. device (`torch.device`): The target device for the returned `DataLoader`. num_processes (`int`, *optional*): The number of processes running concurrently. Will default to the value given by [`~state.PartialState`]. process_index (`int`, *optional*): The index of the current process. Will default to the value given by [`~state.PartialState`]. split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. put_on_device (`bool`, *optional*, defaults to `False`): Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or dictionaries of tensors). rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. dispatch_batches (`bool`, *optional*): If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` when the underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. use_seedable_sampler (`bool`, *optional*, defaults to `False`): Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better reproducability. Comes at a cost of potentially different performances due to different shuffling algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every `self.set_epoch` non_blocking (`bool`, *optional*, defaults to `False`): If set to `True`, dataloader will utilize non-blocking host-to-device transfers. If the dataloader has `pin_memory` set to `True`, this will help to increase overlap between data transfer and computations. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): "If set to true, the dataloader prepared by the Accelerator will be backed by " "[torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed." Returns: `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip> """ if dispatch_batches is None: if not put_on_device: dispatch_batches = False else: dispatch_batches = isinstance(dataloader.dataset, IterableDataset) if dispatch_batches and not put_on_device: raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.") # Grab defaults from PartialState state = PartialState() if num_processes is None: num_processes = state.num_processes if process_index is None: process_index = state.process_index # Sanity check if split_batches: if dataloader.batch_size is not None: batch_size_for_check = dataloader.batch_size else: # For custom batch_sampler if hasattr(dataloader.batch_sampler, "batch_size"): batch_size_for_check = dataloader.batch_sampler.batch_size else: raise ValueError( "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed " "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. " "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` " f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set." ) if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0: raise ValueError( f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) new_dataset = dataloader.dataset # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) synchronized_generator = None sampler = get_sampler(dataloader) if isinstance(sampler, RandomSampler) and use_seedable_sampler: # When iterating through the dataloader during distributed processes # we want to ensure that on each process we are iterating through the same # samples in the same order if a seed is set. This requires a tweak # to the `torch.utils.data.RandomSampler` class (if used). sampler = SeedableRandomSampler( data_source=sampler.data_source, replacement=sampler.replacement, num_samples=sampler._num_samples, generator=getattr(sampler, "generator", torch.Generator()), ) if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA: # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled. generator = torch.Generator().manual_seed(42) dataloader.generator = generator dataloader.sampler.generator = generator # No change if no multiprocess if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches: if isinstance(new_dataset, IterableDataset): if getattr(dataloader.dataset, "generator", None) is not None: synchronized_generator = dataloader.dataset.generator new_dataset = IterableDatasetShard( new_dataset, batch_size=dataloader.batch_size, drop_last=dataloader.drop_last, num_processes=num_processes, process_index=process_index, split_batches=split_batches, ) else: if not use_seedable_sampler and hasattr(sampler, "generator"): if sampler.generator is None: sampler.generator = torch.Generator() synchronized_generator = sampler.generator batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler new_batch_sampler = BatchSamplerShard( batch_sampler, num_processes=num_processes, process_index=process_index, split_batches=split_batches, even_batches=even_batches, ) # We ignore all of those since they are all dealt with by our new_batch_sampler ignore_kwargs = [ "batch_size", "shuffle", "sampler", "batch_sampler", "drop_last", ] if rng_types is not None and synchronized_generator is None and "generator" in rng_types: rng_types.remove("generator") kwargs = { k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs } # Need to provide batch_size as batch_sampler is None for Iterable dataset if new_batch_sampler is None: kwargs["drop_last"] = dataloader.drop_last kwargs["batch_size"] = ( dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size ) if dispatch_batches: kwargs.pop("generator") dataloader = DataLoaderDispatcher( new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, slice_fn=slice_fn_for_dispatch, use_stateful_dataloader=use_stateful_dataloader, **kwargs, ) elif sampler_is_batch_sampler: dataloader = DataLoaderShard( new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, sampler=new_batch_sampler, batch_size=dataloader.batch_size, rng_types=rng_types, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, synchronized_generator=synchronized_generator, use_stateful_dataloader=use_stateful_dataloader, **kwargs, ) else: dataloader = DataLoaderShard( new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, batch_sampler=new_batch_sampler, rng_types=rng_types, synchronized_generator=synchronized_generator, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, use_stateful_dataloader=use_stateful_dataloader, **kwargs, ) if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler: dataloader.set_sampler(sampler) if state.distributed_type == DistributedType.XLA: return MpDeviceLoaderWrapper(dataloader, device) return dataloader class SkipBatchSampler(BatchSampler): """ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`. """ def __init__(self, batch_sampler, skip_batches=0): self.batch_sampler = batch_sampler self.sampler = batch_sampler.sampler self.skip_batches = skip_batches def __iter__(self): for index, samples in enumerate(self.batch_sampler): if index >= self.skip_batches: yield samples @property def total_length(self): return len(self.batch_sampler) def __len__(self): return len(self.batch_sampler) - self.skip_batches class SkipDataLoader(DataLoaderAdapter, DataLoaderStateMixin): """ Subclass of a PyTorch `DataLoader` that will skip the first batches. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this datalaoder. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. kwargs: All other keyword arguments to pass to the regular `DataLoader` initialization. """ def __init__(self, dataset, skip_batches=0, use_stateful_dataloader=False, **kwargs): super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) self.skip_batches = skip_batches self.gradient_state = GradientState() def __iter__(self): self.begin() for index, batch in enumerate(self.base_dataloader.__iter__()): if index >= self.skip_batches: self._update_state_dict() yield batch self.end() def skip_first_batches(dataloader, num_batches=0): """ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. """ if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import StatefulDataLoader state = PartialState() if state.distributed_type == DistributedType.XLA: device = dataloader.device dataloader = dataloader.dataloader dataset = dataloader.dataset sampler_is_batch_sampler = False if isinstance(dataset, IterableDataset): new_batch_sampler = None else: sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches) # We ignore all of those since they are all dealt with by our new_batch_sampler ignore_kwargs = [ "batch_size", "shuffle", "sampler", "batch_sampler", "drop_last", ] kwargs = { k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs } # Need to provide batch_size as batch_sampler is None for Iterable dataset if new_batch_sampler is None: kwargs["drop_last"] = dataloader.drop_last kwargs["batch_size"] = dataloader.batch_size if isinstance(dataloader, DataLoaderDispatcher): if new_batch_sampler is None: # Need to manually skip batches in the dataloader kwargs["skip_batches"] = num_batches dataloader = DataLoaderDispatcher( dataset, split_batches=dataloader.split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader._drop_last, use_stateful_dataloader=dataloader.use_stateful_dataloader, **kwargs, ) elif isinstance(dataloader, DataLoaderShard): if new_batch_sampler is None: # Need to manually skip batches in the dataloader kwargs["skip_batches"] = num_batches elif sampler_is_batch_sampler: kwargs["sampler"] = new_batch_sampler kwargs["batch_size"] = dataloader.batch_size else: kwargs["batch_sampler"] = new_batch_sampler dataloader = DataLoaderShard( dataset, device=dataloader.device, rng_types=dataloader.rng_types, synchronized_generator=dataloader.synchronized_generator, use_stateful_dataloader=dataloader.use_stateful_dataloader, **kwargs, ) else: if new_batch_sampler is None: # Need to manually skip batches in the dataloader dataloader = SkipDataLoader( dataset, skip_batches=num_batches, use_stateful_dataloader=dataloader.use_stateful_dataloader, **kwargs ) elif is_torchdata_stateful_dataloader_available() and isinstance(dataloader, StatefulDataLoader): dataloader = StatefulDataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs) else: dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs) if state.distributed_type == DistributedType.XLA: dataloader = MpDeviceLoaderWrapper(dataloader, device) return dataloader
accelerate/src/accelerate/data_loader.py/0
{ "file_path": "accelerate/src/accelerate/data_loader.py", "repo_id": "accelerate", "token_count": 24827 }
8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available from accelerate.utils.deepspeed import DummyOptim, DummyScheduler MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() elif is_mlu_available(): torch.mlu.empty_cache() torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.mlu.memory_allocated() elif is_musa_available(): torch.musa.empty_cache() torch.musa.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.musa.memory_allocated() elif is_npu_available(): torch.npu.empty_cache() torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.npu.memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.xpu.memory_allocated() return self def __exit__(self, *exc): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() elif is_mlu_available(): torch.mlu.empty_cache() torch.mlu.memory_allocated() # reset the peak gauge to zero self.begin = torch.mlu.max_memory_allocated() elif is_musa_available(): torch.musa.empty_cache() torch.musa.memory_allocated() # reset the peak gauge to zero self.begin = torch.musa.max_memory_allocated() elif is_npu_available(): torch.npu.empty_cache() self.end = torch.npu.memory_allocated() self.peak = torch.npu.max_memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() self.end = torch.xpu.memory_allocated() self.peak = torch.xpu.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def get_dataloaders( accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased", n_train: int = 320, n_val: int = 160, ): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): The name of the model to use. n_train (`int`, *optional*): The number of training examples to use. n_val (`int`, *optional*): The number of validation examples to use. """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset( "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} ) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader def training_function(config, args): # Initialize accelerator accelerator = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) model_name = args.model_name_or_path set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) # Instantiate optimizer optimizer_cls = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(params=model.parameters(), lr=lr) if accelerator.state.deepspeed_plugin is not None: gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: gradient_accumulation_steps = 1 max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=max_training_steps, ) else: lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # Now we train the model train_total_peak_memory = {} for epoch in range(starting_epoch, num_epochs): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f: json.dump(train_total_peak_memory, f) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") parser.add_argument( "--model_name_or_path", type=str, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--peak_memory_upper_bound", type=float, default=None, help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", ) parser.add_argument( "--n_train", type=int, default=320, help="Number of training examples to use.", ) parser.add_argument( "--n_val", type=int, default=160, help="Number of validation examples to use.", ) parser.add_argument( "--num_epochs", type=int, default=1, help="Number of train epochs.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py", "repo_id": "accelerate", "token_count": 4676 }
9
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from copy import deepcopy from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_4bit_bnb_available, is_8bit_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) logger = logging.getLogger(__name__) def load_and_quantize_model( model: torch.nn.Module, bnb_quantization_config: BnbQuantizationConfig, weights_location: Union[str, os.PathLike] = None, device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, no_split_module_classes: Optional[List[str]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_state_dict: bool = False, ): """ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model """ load_in_4bit = bnb_quantization_config.load_in_4bit load_in_8bit = bnb_quantization_config.load_in_8bit if load_in_8bit and not is_8bit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_4bit and not is_4bit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) modules_on_cpu = [] # custom device map if isinstance(device_map, dict) and len(device_map.keys()) > 1: modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) # add cpu modules to skip modules only for 4-bit modules if load_in_4bit: bnb_quantization_config.skip_modules.extend(modules_on_cpu) modules_to_not_convert = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fp32_modules is None: bnb_quantization_config.keep_in_fp32_modules = [] keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules modules_to_not_convert.extend(keep_in_fp32_modules) # compatibility with peft model.is_loaded_in_4bit = load_in_4bit model.is_loaded_in_8bit = load_in_8bit model_device = get_parameter_device(model) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) # convert param to the right dtype dtype = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): param.to(torch.float32) if param.dtype != torch.float32: name = name.replace(".weight", "").replace(".bias", "") param = getattr(model, name, None) if param is not None: param.to(torch.float32) elif torch.is_floating_point(param): param.to(dtype) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device()) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device()) else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info( f"The model device type is {model_device.type}. However, cuda is needed for quantization." "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " ) else: with init_empty_weights(): model = replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert ) device_map = get_quantized_model_device_map( model, bnb_quantization_config, device_map, max_memory=max_memory, no_split_module_classes=no_split_module_classes, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True offload = any(x in list(device_map.values()) for x in ["cpu", "disk"]) load_checkpoint_in_model( model, weights_location, device_map, dtype=bnb_quantization_config.torch_dtype, offload_folder=offload_folder, offload_state_dict=offload_state_dict, keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules, offload_8bit_bnb=load_in_8bit and offload, ) return dispatch_model(model, device_map=device_map, offload_dir=offload_folder) def get_quantized_model_device_map( model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None ): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.") if isinstance(device_map, str): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) special_dtypes = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules) } ) special_dtypes.update( { name: torch.float32 for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules) } ) kwargs = {} kwargs["special_dtypes"] = special_dtypes kwargs["no_split_module_classes"] = no_split_module_classes kwargs["dtype"] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": max_memory = get_balanced_memory( model, low_zero=(device_map == "balanced_low_0"), max_memory=max_memory, **kwargs, ) kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, **kwargs) if isinstance(device_map, dict): # check if don't have any quantized module on the cpu modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules device_map_without_some_modules = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_4bit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[str]`): Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[str]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert. """ if modules_to_not_convert is None: modules_to_not_convert = [] model, has_been_replaced = _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert, current_key_name ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb has_been_replaced = False for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) proceed = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: proceed = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_8bit: bnb_module = bnb.nn.Linear8bitLt( module.in_features, module.out_features, module.bias is not None, has_fp16_weights=False, threshold=bnb_quantization_config.llm_int8_threshold, ) elif bnb_quantization_config.load_in_4bit: bnb_module = bnb.nn.Linear4bit( module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_4bit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant, quant_type=bnb_quantization_config.bnb_4bit_quant_type, ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False") bnb_module.weight.data = module.weight.data if module.bias is not None: bnb_module.bias.data = module.bias.data bnb_module.requires_grad_(False) setattr(model, name, bnb_module) has_been_replaced = True if len(list(module.children())) > 0: _, _has_been_replaced = _replace_with_bnb_layers( module, bnb_quantization_config, modules_to_not_convert, current_key_name ) has_been_replaced = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model with init_empty_weights(): tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_params = find_tied_parameters(tied_model) # For compatibility with Accelerate < 0.18 if isinstance(tied_params, dict): tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) else: tied_keys = sum(tied_params, []) has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = False if hasattr(model, "base_model_prefix"): is_base_model = not hasattr(model, model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head list_modules = list(model.named_children()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = list(set(tied_keys)) + list(intersection) # remove ".weight" from the keys names_to_remove = [".weight", ".bias"] filtered_module_names = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: name = name.replace(name_to_remove, "") filtered_module_names.append(name) return filtered_module_names def has_4bit_bnb_layers(model): """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model""" # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb for m in model.modules(): if isinstance(m, bnb.nn.Linear4bit): return True return False def get_parameter_device(parameter: nn.Module): return next(parameter.parameters()).device def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fp16_statistics is None: set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param) tensor_name = param_name module = model if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] # offload weights module._parameters[tensor_name].requires_grad = False offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index) if hasattr(module._parameters[tensor_name], "SCB"): offload_weight( module._parameters[tensor_name].SCB, param_name.replace("weight", "SCB"), offload_folder, index=offload_index, ) else: offload_weight(param, param_name, offload_folder, index=offload_index) offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index) set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
accelerate/src/accelerate/utils/bnb.py/0
{ "file_path": "accelerate/src/accelerate/utils/bnb.py", "repo_id": "accelerate", "token_count": 8764 }
10
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import subprocess import sys def install_xla(upgrade: bool = False): """ Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. Args: upgrade (`bool`, *optional*, defaults to `False`): Whether to upgrade `torch` and install the latest `torch_xla` wheels. Example: ```python >>> from accelerate.utils import install_xla >>> install_xla(upgrade=True) ``` """ in_colab = False if "IPython" in sys.modules: in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) if in_colab: if upgrade: torch_install_cmd = ["pip", "install", "-U", "torch"] subprocess.run(torch_install_cmd, check=True) # get the current version of torch torch_version = importlib.metadata.version("torch") torch_version_trunc = torch_version[: torch_version.rindex(".")] xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl" xla_install_cmd = ["pip", "install", xla_wheel] subprocess.run(xla_install_cmd, check=True) else: raise RuntimeError("`install_xla` utility works only on google colab.")
accelerate/src/accelerate/utils/torch_xla.py/0
{ "file_path": "accelerate/src/accelerate/utils/torch_xla.py", "repo_id": "accelerate", "token_count": 691 }
11
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu, require_non_cpu, require_non_xpu @require_cpu class CPUOptimizerTester(unittest.TestCase): def test_accelerated_optimizer_pickling(self): model = torch.nn.Linear(10, 10) optimizer = torch.optim.SGD(model.parameters(), 0.1) accelerator = Accelerator() optimizer = accelerator.prepare(optimizer) try: pickle.loads(pickle.dumps(optimizer)) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state() @require_non_cpu @require_non_xpu class OptimizerTester(unittest.TestCase): def test_accelerated_optimizer_step_was_skipped(self): model = torch.nn.Linear(5, 5) optimizer = torch.optim.SGD(model.parameters(), 0.1) accelerator = Accelerator(mixed_precision="fp16") model, optimizer = accelerator.prepare(model, optimizer) loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): # Fake the gradients, as if there's no overflow p.grad.fill_(0.01) optimizer.step() assert optimizer.step_was_skipped is False loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): p.grad.fill_(0.01) # Manually set the gradients to be NaN, as if there's an overflow p.grad[0] = torch.tensor(float("nan")) optimizer.step() assert optimizer.step_was_skipped is True loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): p.grad.fill_(0.01) # Manually set the gradients to be NaN, as if there's an overflow p.grad[0] = torch.tensor(float("nan")) optimizer.step() assert optimizer.step_was_skipped is True loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): # Fake the gradients, as if there's no overflow p.grad.fill_(0.01) optimizer.step() assert optimizer.step_was_skipped is False AcceleratorState._reset_state()
accelerate/tests/test_optimizer.py/0
{ "file_path": "accelerate/tests/test_optimizer.py", "repo_id": "accelerate", "token_count": 1209 }
12
.PHONY: style quality # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := src tests scripts style: black --line-length 119 --target-version py310 $(check_dirs) setup.py isort $(check_dirs) setup.py quality: black --check --line-length 119 --target-version py310 $(check_dirs) setup.py isort --check-only $(check_dirs) setup.py flake8 --max-line-length 119 $(check_dirs) setup.py # Release stuff pre-release: python src/alignment/release.py pre-patch: python src/alignment/release.py --patch post-release: python src/alignment/release.py --post_release post-patch: python src/alignment/release.py --post_release --patch wheels: python setup.py bdist_wheel && python setup.py sdist wheels_clean: rm -rf build && rm -rf dist pypi_upload: python -m pip install twine twine upload dist/* -r pypi pypi_test_upload: python -m pip install twine twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
alignment-handbook/Makefile/0
{ "file_path": "alignment-handbook/Makefile", "repo_id": "alignment-handbook", "token_count": 363 }
13
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path from typing import Dict import torch from transformers import AutoTokenizer, BitsAndBytesConfig, PreTrainedTokenizer from transformers.trainer_utils import get_last_checkpoint from accelerate import Accelerator from huggingface_hub import list_repo_files from huggingface_hub.utils._errors import RepositoryNotFoundError from huggingface_hub.utils._validators import HFValidationError from peft import LoraConfig, PeftConfig from .configs import DataArguments, DPOConfig, ModelArguments, SFTConfig from .data import DEFAULT_CHAT_TEMPLATE def get_current_device() -> int: """Get the current device. For GPU we return the local process index to enable multiple GPU training.""" return Accelerator().local_process_index if torch.cuda.is_available() else "cpu" def get_kbit_device_map() -> Dict[str, int] | None: """Useful for running inference with quantized models by setting `device_map=get_peft_device_map()`""" return {"": get_current_device()} if torch.cuda.is_available() else None def get_quantization_config(model_args: ModelArguments) -> BitsAndBytesConfig | None: if model_args.load_in_4bit: compute_dtype = torch.float16 if model_args.torch_dtype not in {"auto", None}: compute_dtype = getattr(torch, model_args.torch_dtype) quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_quant_type=model_args.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant, bnb_4bit_quant_storage=model_args.bnb_4bit_quant_storage, ).to_dict() elif model_args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_8bit=True, ).to_dict() else: quantization_config = None return quantization_config def get_tokenizer( model_args: ModelArguments, data_args: DataArguments, auto_set_chat_template: bool = True ) -> PreTrainedTokenizer: """Get the tokenizer for the model.""" tokenizer = AutoTokenizer.from_pretrained( ( model_args.model_name_or_path if model_args.tokenizer_name_or_path is None else model_args.tokenizer_name_or_path ), revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, ) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id if data_args.truncation_side is not None: tokenizer.truncation_side = data_args.truncation_side # Set reasonable default for models without max length if tokenizer.model_max_length > 100_000: tokenizer.model_max_length = 2048 if data_args.chat_template is not None: tokenizer.chat_template = data_args.chat_template elif auto_set_chat_template and tokenizer.get_chat_template() is None: tokenizer.chat_template = DEFAULT_CHAT_TEMPLATE return tokenizer def get_peft_config(model_args: ModelArguments) -> PeftConfig | None: if model_args.use_peft is False: return None peft_config = LoraConfig( r=model_args.lora_r, lora_alpha=model_args.lora_alpha, lora_dropout=model_args.lora_dropout, bias="none", task_type="CAUSAL_LM", target_modules=model_args.lora_target_modules, modules_to_save=model_args.lora_modules_to_save, ) return peft_config def is_adapter_model(model_name_or_path: str, revision: str = "main") -> bool: try: # Try first if model on a Hub repo repo_files = list_repo_files(model_name_or_path, revision=revision) except (HFValidationError, RepositoryNotFoundError): # If not, check local repo repo_files = os.listdir(model_name_or_path) return "adapter_model.safetensors" in repo_files or "adapter_model.bin" in repo_files def get_checkpoint(training_args: SFTConfig | DPOConfig) -> Path | None: last_checkpoint = None if os.path.isdir(training_args.output_dir): last_checkpoint = get_last_checkpoint(training_args.output_dir) return last_checkpoint
alignment-handbook/src/alignment/model_utils.py/0
{ "file_path": "alignment-handbook/src/alignment/model_utils.py", "repo_id": "alignment-handbook", "token_count": 1806 }
14
# Changelog This documents the main changes to the `candle` crate. ## v0.3.1 - Unreleased ### Added ### Modified ## v0.3.0 - 2023-10-01 ### Added - Added the Mistral 7b v0.1 model [983](https://github.com/huggingface/candle/pull/983). - Quantized version of the Mistral model [1009](https://github.com/huggingface/candle/pull/1009). - Add the gelu-erf op and activation function [969](https://github.com/huggingface/candle/pull/969). - Add the mixformer/phi-v1.5 model [930](https://github.com/huggingface/candle/pull/930). - Add the sclice-scatter op [927](https://github.com/huggingface/candle/pull/927). - Add the Wuerstchen diffusion model [911](https://github.com/huggingface/candle/pull/911). ### Modified - Support for simd128 intrinsics in some quantized vecdots [982](https://github.com/huggingface/candle/pull/982). - Optimize the index-select cuda kernel [976](https://github.com/huggingface/candle/pull/976). - Self-contained safetensor wrappers [946](https://github.com/huggingface/candle/pull/946). ## v0.2.2 - 2023-09-18 ### Added - Support for `top_p` sampling [819](https://github.com/huggingface/candle/pull/819). - T5 model including decoding [864](https://github.com/huggingface/candle/pull/864). - 1-d upsampling [839](https://github.com/huggingface/candle/pull/839). ### Modified - Bugfix for conv2d [820](https://github.com/huggingface/candle/pull/820). - Support tensor based indexing using `.i` [842](https://github.com/huggingface/candle/pull/842). ## v0.2.1 - 2023-09-11 ### Added - Add some RNNs (GRU and LSTM) in `candle-nn` [674](https://github.com/huggingface/candle/pull/674), [688](https://github.com/huggingface/candle/pull/688). - gguf v2 support [725](https://github.com/huggingface/candle/pull/725). - Quantized llama example in Python using the pyo3 api [716](https://github.com/huggingface/candle/pull/716). - `candle-nn` layer for conv2d-transposed [760](https://github.com/huggingface/candle/pull/760). - Add the Segment-Anything Model (SAM) as an example [773](https://github.com/huggingface/candle/pull/773). - TinyViT backbone for the segment anything example [787](https://github.com/huggingface/candle/pull/787). - Shape with holes support [770](https://github.com/huggingface/candle/pull/770). ### Modified - Dilations are now supported in conv-transpose2d. [671](https://github.com/huggingface/candle/pull/671). - Interactive mode for the quantized model [690](https://github.com/huggingface/candle/pull/690). - Faster softmax operation [747](https://github.com/huggingface/candle/pull/747). - Faster convolution operations on CPU and CUDA via im2col [802](https://github.com/huggingface/candle/pull/802). - Moving some models to a more central location [796](https://github.com/huggingface/candle/pull/796). ## v0.2.0 - 2023-08-30 ### Added - Add the powf op [664](https://github.com/huggingface/candle/pull/664). - Stable Diffusion XL support [647](https://github.com/huggingface/candle/pull/647). - Add the conv-transpose2d op [635](https://github.com/huggingface/candle/pull/635). - Refactor the VarBuilder api [627](https://github.com/huggingface/candle/pull/627). - Add some quantization command [625](https://github.com/huggingface/candle/pull/625). - Support more quantized types, e.g. Q2K, Q4K, Q5K... [586](https://github.com/huggingface/candle/pull/586). - Add pose estimation to the yolo example [589](https://github.com/huggingface/candle/pull/589). - Api to write GGUF files [585](https://github.com/huggingface/candle/pull/585). - Support more quantization types [580](https://github.com/huggingface/candle/pull/580). - Add EfficientNet as an example Computer Vision model [572](https://github.com/huggingface/candle/pull/572). - Add a group parameter to convolutions [566](https://github.com/huggingface/candle/pull/566). - New dtype: int64 [563](https://github.com/huggingface/candle/pull/563). - Handling of the GGUF file format. [559](https://github.com/huggingface/candle/pull/559). ## v0.1.2 - 2023-08-21
candle/CHANGELOG.md/0
{ "file_path": "candle/CHANGELOG.md", "repo_id": "candle", "token_count": 1525 }
15
# Chapter 1
candle/candle-book/src/chapter_1.md/0
{ "file_path": "candle/candle-book/src/chapter_1.md", "repo_id": "candle", "token_count": 4 }
16
# MNIST So we now have downloaded the MNIST parquet files, let's put them in a simple struct. ```rust,ignore {{#include ../lib.rs:book_training_3}} ``` The parsing of the file and putting it into single tensors requires the dataset to fit the entire memory. It is quite rudimentary, but simple enough for a small dataset like MNIST.
candle/candle-book/src/training/mnist.md/0
{ "file_path": "candle/candle-book/src/training/mnist.md", "repo_id": "candle", "token_count": 93 }
17
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let a = Tensor::new(&[[0.0f32, 1.0, 2.0], [3.0, 4.0, 5.0]], &Device::Cpu)?; let b = Tensor::new(&[[88.0f32, 99.0]], &Device::Cpu)?; let new_a = a.slice_scatter(&b, 1, 2)?; assert_eq!(a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); assert_eq!(new_a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); Ok(()) }
candle/candle-core/examples/basics.rs/0
{ "file_path": "candle/candle-core/examples/basics.rs", "repo_id": "candle", "token_count": 287 }
18
/// Helper functions to write CPU kernels. use crate::backend::BackendStorage; use crate::{Error, Layout, Result, WithDType}; type C = super::CpuStorage; pub trait Map1 { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>>; fn map(&self, vs: &C, layout: &Layout) -> Result<C> { match vs { C::U8(vs) => Ok(C::U8(self.f(vs, layout)?)), C::U32(vs) => Ok(C::U32(self.f(vs, layout)?)), C::I64(vs) => Ok(C::I64(self.f(vs, layout)?)), C::BF16(vs) => Ok(C::BF16(self.f(vs, layout)?)), C::F16(vs) => Ok(C::F16(self.f(vs, layout)?)), C::F32(vs) => Ok(C::F32(self.f(vs, layout)?)), C::F64(vs) => Ok(C::F64(self.f(vs, layout)?)), } } } pub trait Map1Any { fn f<T: WithDType, W: Fn(Vec<T>) -> C>(&self, vs: &[T], layout: &Layout, wrap: W) -> Result<C>; fn map(&self, vs: &C, layout: &Layout) -> Result<C> { match vs { C::U8(vs) => Ok(self.f(vs, layout, C::U8)?), C::U32(vs) => Ok(self.f(vs, layout, C::U32)?), C::I64(vs) => Ok(self.f(vs, layout, C::I64)?), C::BF16(vs) => Ok(self.f(vs, layout, C::BF16)?), C::F16(vs) => Ok(self.f(vs, layout, C::F16)?), C::F32(vs) => Ok(self.f(vs, layout, C::F32)?), C::F64(vs) => Ok(self.f(vs, layout, C::F64)?), } } } pub trait Map2 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<T>>; fn map(&self, v1: &C, l1: &Layout, v2: &C, l2: &Layout) -> Result<C> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U32(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::I64(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::BF16(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::F16(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::F32(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::F64(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } pub trait Map2U8 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<u8>>; fn map(&self, v1: &C, l1: &Layout, v2: &C, l2: &Layout) -> Result<C> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } pub fn binary_map<T: Copy, U: Copy, F: FnMut(T, T) -> U>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, ) -> Vec<U> { match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => lhs[o_l1..o_l2] .iter() .zip(rhs[o_r1..o_r2].iter()) .map(|(&l, &r)| f(l, r)) .collect(), (Some((o_l1, o_l2)), None) => { // TODO: Maybe we want to avoid going through the layout twice. match rhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; lhs[o_l1..o_l2] .iter() .map(|&l| { let r = unsafe { rhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(l, *r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } (None, Some((o_r1, o_r2))) => { // TODO: Maybe we want to avoid going through the layout twice. match lhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; rhs[o_r1..o_r2] .iter() .map(|&r| { let l = unsafe { lhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(*l, r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } // Similar to binary_map but with vectorized variants. pub fn binary_map_vec<T: Copy, F: FnMut(T, T) -> T, FV: FnMut(&[T], &[T], &mut [T])>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, mut f_vec: FV, ) -> Vec<T> { let el_count = lhs_l.shape().elem_count(); match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => { let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<T>], &mut [T]>(ys_to_set) }; f_vec(&lhs[o_l1..o_l2], &rhs[o_r1..o_r2], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } (Some((o_l1, o_l2)), None) => match rhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<T>], &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_l1..o_l2).step_by(ob.len) { f_vec( &lhs[src_i..src_i + ob.len], rhs, &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys = lhs[o_l1..o_l2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &r) in rhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(*v, r) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, (None, Some((o_r1, o_r2))) => match lhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<T>], &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_r1..o_r2).step_by(ob.len) { f_vec( lhs, &rhs[src_i..src_i + ob.len], &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys = rhs[o_r1..o_r2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &l) in lhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(l, *v) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } pub fn unary_map<T: Copy, U: Copy, F: FnMut(T) -> U>( vs: &[T], layout: &Layout, mut f: F, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => vs [start_offset..start_offset + len] .iter() .map(|&v| f(v)) .collect(), crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut result = Vec::with_capacity(layout.shape().elem_count()); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } } else { for index in block_start_index { for offset in 0..block_len { let v = unsafe { vs.get_unchecked(index + offset) }; result.push(f(*v)) } } } result } } } pub fn unary_map_vec<T: Copy, U: Copy, F: FnMut(T) -> U, FV: FnMut(&[T], &mut [U])>( vs: &[T], layout: &Layout, mut f: F, mut f_vec: FV, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let mut ys: Vec<U> = Vec::with_capacity(len); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<U>], &mut [U]>(ys_to_set) }; f_vec(&vs[start_offset..start_offset + len], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(len) }; ys } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let el_count = layout.shape().elem_count(); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { let mut result = Vec::with_capacity(el_count); for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } result } else { let mut ys: Vec<U> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<U>], &mut [U]>(ys_to_set) }; let mut dst_index = 0; for src_index in block_start_index { let vs = &vs[src_index..src_index + block_len]; let ys = &mut ys_to_set[dst_index..dst_index + block_len]; f_vec(vs, ys); dst_index += block_len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } } } }
candle/candle-core/src/cpu_backend/utils.rs/0
{ "file_path": "candle/candle-core/src/cpu_backend/utils.rs", "repo_id": "candle", "token_count": 9033 }
19
use crate::{DType, Result}; use candle_metal_kernels::Kernels; use metal::{Buffer, CommandBuffer, CommandQueue, MTLResourceOptions, NSUInteger}; use std::collections::HashMap; use std::ffi::c_void; use std::path::Path; use std::sync::{Arc, Mutex, RwLock, RwLockWriteGuard}; use super::MetalError; /// Unique identifier for cuda devices. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct DeviceId(usize); impl DeviceId { pub(crate) fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } type BufferMap = HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>; type AllocatedBuffers = Arc<RwLock<BufferMap>>; #[derive(Clone)] pub struct MetalDevice { /// Unique identifier, the registryID is not sufficient as it identifies the GPU rather than /// the device itself. pub(crate) id: DeviceId, /// Raw metal device: <https://developer.apple.com/documentation/metal/mtldevice?language=objc> pub(crate) device: metal::Device, /// Single command queue for the entire device. pub(crate) command_queue: CommandQueue, /// One command buffer at a time. /// The scheduler works by allowing multiple /// [ComputeCommandEncoder](https://developer.apple.com/documentation/metal/mtlcomputecommandencoder?language=objc) /// on a single command buffer. Using a single command buffer would be fastest on the GPU but /// prevents overlapping of CPU and GPU commands (because command buffer needs to be committed /// to start to work). /// Despite what the documentation says, command buffers are NOT ordered. They are ordered /// for their START time, but there's no guarantee that command buffer1 will finish before /// command buffer2 starts (or there are metal bugs there) pub(crate) command_buffer: Arc<RwLock<CommandBuffer>>, /// Keeps track of the current amount of compute command encoders on the current /// command buffer /// Arc, RwLock because of the interior mutability. pub(crate) command_buffer_index: Arc<RwLock<usize>>, /// The maximum amount of [compute command encoder](https://developer.apple.com/documentation/metal/mtlcomputecommandencoder?language=objc) per [command buffer](https://developer.apple.com/documentation/metal/mtlcommandbuffer?language=objc) pub(crate) compute_per_buffer: usize, /// Simple keeper struct to keep track of the already compiled kernels so we can reuse them. /// Heavily used by [`candle_metal_kernels`] pub(crate) kernels: Arc<Kernels>, /// Simple allocator struct. /// The buffers are stored in size buckets since ML tends to use similar shapes over and over. /// We store the buffers in [`Arc`] because it's much faster than Obj-c internal ref counting /// (could be linked to FFI communication overhead). /// /// Whenever a buffer has a strong_count==1, we can reuse it, it means it was dropped in the /// graph calculation, and only we the allocator kept a reference to it, therefore it's free /// to be reused. However, in order for this to work, we need to guarantee the order of /// operation, so that this buffer is not being used by another kernel at the same time. /// Arc is the CPU reference count, it doesn't mean anything on the GPU side of things. /// /// Whenever we actually allocate a new buffer, we make a full sweep to clean up unused buffers /// (strong_count = 1). pub(crate) buffers: AllocatedBuffers, /// Seed for random number generation. pub(crate) seed: Arc<Mutex<Buffer>>, } impl std::fmt::Debug for MetalDevice { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "MetalDevice({:?})", self.id) } } impl std::ops::Deref for MetalDevice { type Target = metal::DeviceRef; fn deref(&self) -> &Self::Target { &self.device } } impl MetalDevice { pub fn id(&self) -> DeviceId { self.id } pub fn metal_device(&self) -> &metal::Device { &self.device } pub fn command_queue(&self) -> &CommandQueue { &self.command_queue } pub fn command_buffer(&self) -> Result<CommandBuffer> { let mut command_buffer_lock = self.command_buffer.write().map_err(MetalError::from)?; let mut command_buffer = command_buffer_lock.to_owned(); let mut index = self .command_buffer_index .write() .map_err(MetalError::from)?; if *index > self.compute_per_buffer { command_buffer.commit(); command_buffer = self.command_queue.new_command_buffer().to_owned(); *command_buffer_lock = command_buffer.clone(); *index = 0; self.drop_unused_buffers()?; } *index += 1; Ok(command_buffer) } pub fn wait_until_completed(&self) -> Result<()> { let mut command_buffer = self.command_buffer.write().map_err(MetalError::from)?; match command_buffer.status() { metal::MTLCommandBufferStatus::Committed | metal::MTLCommandBufferStatus::Scheduled | metal::MTLCommandBufferStatus::Completed => { panic!("Already committed"); } _ => {} } command_buffer.commit(); command_buffer.wait_until_completed(); *command_buffer = self.command_queue.new_command_buffer().to_owned(); Ok(()) } pub fn kernels(&self) -> &Kernels { &self.kernels } pub fn device(&self) -> &metal::Device { &self.device } /// Creates a new buffer (not necessarily zeroed). /// The buffer is [MTLPrivate](https://developer.apple.com/documentation/metal/mtlstoragemode) /// This means the buffer data cannot be read on the CPU directly. /// /// [`name`] is only used to keep track of the resource origin in case of bugs pub fn new_buffer( &self, element_count: usize, dtype: DType, name: &str, ) -> Result<Arc<Buffer>> { let size = (element_count * dtype.size_in_bytes()) as NSUInteger; self.allocate_buffer(size, MTLResourceOptions::StorageModePrivate, name) } /// Creates a new buffer (not necessarily zeroed). /// The buffer is [MTLManaged](https://developer.apple.com/documentation/metal/mtlstoragemode) /// This means the buffer can be read on the CPU but will require manual /// synchronization when the CPU memory is modified /// Used as a bridge to gather data back from the GPU pub fn new_buffer_managed(&self, size: NSUInteger) -> Result<Arc<Buffer>> { self.allocate_buffer(size, MTLResourceOptions::StorageModeManaged, "managed") } /// Creates a new buffer from data. /// The buffer is [MTLManaged](https://developer.apple.com/documentation/metal/mtlstoragemode) /// /// Does not require synchronization, as [newBufferWithBytes](https://developer.apple.com/documentation/metal/mtldevice/1433429-newbufferwithbytes) /// allocates the buffer and copies over the existing data before returning the MTLBuffer. pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Result<Arc<Buffer>> { let size = core::mem::size_of_val(data) as NSUInteger; let new_buffer = self.device.new_buffer_with_data( data.as_ptr() as *const c_void, size, MTLResourceOptions::StorageModeManaged, ); let mut buffers = self.buffers.write().map_err(MetalError::from)?; let subbuffers = buffers .entry((size, MTLResourceOptions::StorageModeManaged)) .or_insert(vec![]); let new_buffer = Arc::new(new_buffer); subbuffers.push(new_buffer.clone()); Ok(new_buffer) } pub fn allocate_zeros(&self, size_in_bytes: usize) -> Result<Arc<Buffer>> { let buffer = self.allocate_buffer( size_in_bytes as NSUInteger, MTLResourceOptions::StorageModePrivate, "allocate_zeros", )?; let command_buffer = self.command_buffer()?; command_buffer.set_label("zeros"); let blit = command_buffer.new_blit_command_encoder(); blit.fill_buffer( &buffer, metal::NSRange { location: 0, length: buffer.length(), }, 0, ); blit.end_encoding(); Ok(buffer) } fn find_available_buffer( &self, size: NSUInteger, option: MTLResourceOptions, buffers: &RwLockWriteGuard<BufferMap>, ) -> Option<Arc<Buffer>> { let mut best_buffer: Option<&Arc<Buffer>> = None; let mut best_buffer_size: NSUInteger = NSUInteger::MAX; for ((buffer_size, buffer_option), subbuffers) in buffers.iter() { if buffer_size >= &size && buffer_size < &best_buffer_size && buffer_option == &option { for sub in subbuffers { if Arc::strong_count(sub) == 1 { best_buffer = Some(sub); best_buffer_size = *buffer_size; } } } } best_buffer.cloned() } fn drop_unused_buffers(&self) -> Result<()> { let mut buffers = self.buffers.write().map_err(MetalError::from)?; for subbuffers in buffers.values_mut() { let newbuffers = subbuffers .iter() .filter(|s| Arc::strong_count(*s) > 1) .map(Arc::clone) .collect(); *subbuffers = newbuffers; } Ok(()) } /// The critical allocator algorithm fn allocate_buffer( &self, size: NSUInteger, option: MTLResourceOptions, _name: &str, ) -> Result<Arc<Buffer>> { let mut buffers = self.buffers.write().map_err(MetalError::from)?; if let Some(b) = self.find_available_buffer(size, option, &buffers) { // Cloning also ensures we increment the strong count return Ok(b.clone()); } let size = buf_size(size); let subbuffers = buffers.entry((size, option)).or_insert(vec![]); let new_buffer = self.device.new_buffer(size as NSUInteger, option); let new_buffer = Arc::new(new_buffer); subbuffers.push(new_buffer.clone()); Ok(new_buffer) } /// Create a metal GPU capture trace on [`path`]. pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> { let capture = metal::CaptureManager::shared(); let descriptor = metal::CaptureDescriptor::new(); descriptor.set_destination(metal::MTLCaptureDestination::GpuTraceDocument); descriptor.set_capture_device(self); // The [set_output_url] call requires an absolute path so we convert it if needed. if path.as_ref().is_absolute() { descriptor.set_output_url(path); } else { let path = std::env::current_dir()?.join(path); descriptor.set_output_url(path); } capture .start_capture(&descriptor) .map_err(MetalError::from)?; Ok(()) } } fn buf_size(size: NSUInteger) -> NSUInteger { size.saturating_sub(1).next_power_of_two() as NSUInteger }
candle/candle-core/src/metal_backend/device.rs/0
{ "file_path": "candle/candle-core/src/metal_backend/device.rs", "repo_id": "candle", "token_count": 4620 }
20
use super::k_quants::{BlockQ2K, BlockQ4K, BlockQ4_0, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K}; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; use core::arch::wasm32::*; #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1234 = v128_load(x.qs.as_ptr() as *const v128); let x12 = v128_and(x1234, u8x16_splat(0x0F)); let x12 = i8x16_sub(x12, i8x16_splat(8)); let x34 = u8x16_shr(x1234, 4); let x34 = i8x16_sub(x34, i8x16_splat(8)); let x1 = i16x8_extend_low_i8x16(x12); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_extend_high_i8x16(x12); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_extend_low_i8x16(x34); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_extend_high_i8x16(x34); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1 = i16x8_load_extend_i8x8(x.qs.as_ptr()); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(8)); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(16)); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(24)); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } unsafe { let mut sumf = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let mut q2: &[_] = &x.qs; let mut q8: &[_] = &y.qs; let sc = &x.scales; let mut summs = i32x4_splat(0); for i in (0..(QK_K / 16)).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(i)); let scales = i32x4_shr( i32x4( sc[i] as i32, sc[i + 1] as i32, sc[i + 2] as i32, sc[i + 3] as i32, ), 4, ); summs = i32x4_add(summs, i32x4_mul(bsums, scales)) } let summs = f32x4_convert_i32x4(summs); let dall = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let mut isum = i32x4_splat(0); let mut is = 0; for _ in 0..(QK_K / 128) { let mut shift = 0; for _ in 0..4 { let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (0..16).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (16..32).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); shift += 2; // adjust the indexing q8 = &q8[32..]; } // adjust the indexing q2 = &q2[32..]; } let isum = f32x4_convert_i32x4(isum); sumf = f32x4_add( sumf, f32x4_sub( f32x4_mul(isum, f32x4_splat(dall)), f32x4_mul(summs, f32x4_splat(dmin)), ), ); } let sumf = f32x4_extract_lane::<0>(sumf) + f32x4_extract_lane::<1>(sumf) + f32x4_extract_lane::<2>(sumf) + f32x4_extract_lane::<3>(sumf); Ok(sumf) } } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [u8; QK_K] = [0; QK_K]; let mut sums = f32x4_splat(0f32); unsafe { for (y, x) in ys.iter().zip(xs.iter()) { let q4 = &x.qs; let q8 = &y.qs; for j in 0..QK_K / 64 { let q4_1 = v128_load(q4.as_ptr().add(32 * j) as *const v128); let q4_2 = v128_load(q4.as_ptr().add(32 * j + 16) as *const v128); v128_store( aux8.as_mut_ptr().add(64 * j) as *mut v128, v128_and(q4_1, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 16) as *mut v128, v128_and(q4_2, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 32) as *mut v128, u8x16_shr(q4_1, 4), ); v128_store( aux8.as_mut_ptr().add(64 * j + 48) as *mut v128, u8x16_shr(q4_2, 4), ); } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = i32x4_splat(0); for j in (0..QK_K / 16).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(j)); let (m1, m2) = (mins[j / 2] as i32, mins[j / 2 + 1] as i32); let mins = i32x4(m1, m1, m2, m2); sumi = i32x4_add(sumi, i32x4_mul(bsums, mins)); } let mut aux32 = i32x4_splat(0i32); for (scale_i, scale) in scales.iter().enumerate() { let scale = i32x4_splat(*scale as i32); for j in 0..4 { let i = 32 * scale_i + 8 * j; let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(i)); let aux8 = i16x8_load_extend_u8x8(aux8.as_ptr().add(i)); let aux16 = i16x8_mul(q8, aux8); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_low_i16x8(aux16))); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_high_i16x8(aux16))); } } let aux32 = f32x4_convert_i32x4(aux32); let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); let dmin = x.dmin.to_f32() * y.d; let dmin = f32x4_splat(dmin); let sumi = f32x4_convert_i32x4(sumi); sums = f32x4_sub(sums, f32x4_mul(sumi, dmin)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut aux8 = [0i8; QK_K]; unsafe { let mut sums = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let q4 = &x.ql; let qh = &x.qh; let q8 = &y.qs; let mut aux32 = f32x4_splat(0f32); for j in (0..QK_K).step_by(128) { let aux8 = aux8.as_mut_ptr().add(j); let q4 = &q4.as_ptr().add(j / 2); let qh = &qh.as_ptr().add(j / 4); for l in (0..32).step_by(16) { // aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and(v128_load(qh.add(l) as *const v128), u8x16_splat(3)), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 32] = // (((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l + 32) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 2), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 32) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 4), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 64) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 96] = // (((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l + 32) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 6), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 96) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); } } for (j, &scale) in x.scales.iter().enumerate() { let scale = f32x4_splat(scale as f32); for offset in [0, 8] { let aux16 = i16x8_mul( i16x8_load_extend_i8x8(q8.as_ptr().add(16 * j + offset)), i16x8_load_extend_i8x8(aux8.as_ptr().add(16 * j + offset)), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_low_i16x8(aux16)), scale), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_high_i16x8(aux16)), scale), ); } } let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (xs, ys) in xs.iter().zip(ys.iter()) { let x_qs = xs.qs.as_ptr(); let y_qs = ys.qs.as_ptr(); let mut sumi = i32x4_splat(0); for j in (0..QK_K).step_by(8) { let xs = i16x8_load_extend_i8x8(x_qs.add(j)); let ys = i16x8_load_extend_i8x8(y_qs.add(j)); let sum_xy = i32x4_dot_i16x8(xs, ys); sumi = i32x4_add(sumi, sum_xy) } let d = f32x4_splat(xs.d * ys.d); acc = f32x4_add(acc, f32x4_mul(f32x4_convert_i32x4(sumi), d)) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } }
candle/candle-core/src/quantized/simd128.rs/0
{ "file_path": "candle/candle-core/src/quantized/simd128.rs", "repo_id": "candle", "token_count": 11617 }
21
use anyhow::Result; use candle_core::{DType, Device::Cpu, Tensor}; #[test] fn display_scalar() -> Result<()> { let t = Tensor::new(1234u32, &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[1234]\nTensor[[], u32]"); let t = t.to_dtype(DType::F32)?.neg()?; let s = format!("{}", (&t / 10.0)?); assert_eq!(&s, "[-123.4000]\nTensor[[], f32]"); let s = format!("{}", (&t / 1e8)?); assert_eq!(&s, "[-1.2340e-5]\nTensor[[], f32]"); let s = format!("{}", (&t * 1e8)?); assert_eq!(&s, "[-1.2340e11]\nTensor[[], f32]"); let s = format!("{}", (&t * 0.)?); assert_eq!(&s, "[0.]\nTensor[[], f32]"); Ok(()) } #[test] fn display_vector() -> Result<()> { let t = Tensor::new::<&[u32; 0]>(&[], &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[]\nTensor[[0], u32]"); let t = Tensor::new(&[0.1234567, 1.0, -1.2, 4.1, f64::NAN], &Cpu)?; let s = format!("{t}"); assert_eq!( &s, "[ 0.1235, 1.0000, -1.2000, 4.1000, NaN]\nTensor[[5], f64]" ); let t = (Tensor::ones(50, DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.] Tensor[[50], f32]"#; assert_eq!(&s, expected); let t = (Tensor::ones(11000, DType::F32, &Cpu)? * 42.)?; let s = format!("{t}"); assert_eq!( &s, "[42., 42., 42., ..., 42., 42., 42.]\nTensor[[11000], f32]" ); Ok(()) } #[test] fn display_multi_dim() -> Result<()> { let t = (Tensor::ones((200, 100), DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]] Tensor[[200, 100], f32]"#; assert_eq!(&s, expected); let t = t.reshape(&[2, 1, 1, 100, 100])?; let t = format!("\n{t}"); let expected = r#" [[[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]], [[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]]] Tensor[[2, 1, 1, 100, 100], f32]"#; assert_eq!(&t, expected); Ok(()) }
candle/candle-core/tests/display_tests.rs/0
{ "file_path": "candle/candle-core/tests/display_tests.rs", "repo_id": "candle", "token_count": 1395 }
22
# candle-beit [Beit](https://arxiv.org/abs/2106.08254) is a computer vision model. In this example, it is used as an ImageNet classifier: the model returns the probability for the image to belong to each of the 1000 ImageNet categories. ## Running some example ```bash cargo run --example beit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg > mountain bike, all-terrain bike, off-roader: 56.16% > bicycle-built-for-two, tandem bicycle, tandem: 3.08% > maillot : 2.23% > alp : 0.88% > crash helmet : 0.85% ``` ![Leading group, Giro d'Italia 2021](../yolo-v8/assets/bike.jpg)
candle/candle-examples/examples/beit/README.md/0
{ "file_path": "candle/candle-examples/examples/beit/README.md", "repo_id": "candle", "token_count": 261 }
23
pub const LAYERNORM_KERNELS: &str = include_str!(concat!(env!("OUT_DIR"), "/layernorm_kernels.ptx"));
candle/candle-examples/examples/custom-ops/cuda_kernels.rs/0
{ "file_path": "candle/candle-examples/examples/custom-ops/cuda_kernels.rs", "repo_id": "candle", "token_count": 44 }
24
# candle-endocec [EnCodec](https://huggingface.co/facebook/encodec_24khz) is a high-quality audio compression model using an encoder/decoder architecture with residual vector quantization. ## Running one example ```bash cargo run --example encodec --features encodec --release -- code-to-audio \ candle-examples/examples/encodec/jfk-codes.safetensors \ jfk.wav ``` This decodes the EnCodec tokens stored in `jfk-codes.safetensors` and generates an output wav file containing the audio data. Instead of `code-to-audio` one can use: - `audio-to-audio in.mp3 out.wav`: encodes the input audio file then decodes it to a wav file. - `audio-to-code in.mp3 out.safetensors`: generates a safetensors file containing EnCodec tokens for the input audio file. If the audio output file name is set to `-`, the audio content directly gets played on default audio output device. If the audio input file is set to `-`, the audio gets recorded from the default audio input.
candle/candle-examples/examples/encodec/README.md/0
{ "file_path": "candle/candle-examples/examples/encodec/README.md", "repo_id": "candle", "token_count": 305 }
25
* GLM4 GLM-4-9B is the open-source version of the latest generation of pre-trained models in the GLM-4 series launched by Zhipu AI. - [[https://github.com/THUDM/GLM4][Github]] - [[https://huggingface.co/THUDM/glm-4-9b][huggingface]] ** Running with ~cuda~ #+begin_src shell cargo run --example glm4 --release --features cuda #+end_src ** Running with ~cpu~ #+begin_src shell cargo run --example glm4 --release -- --cpu #+end_src ** Output Example #+begin_src shell cargo run --example glm4 --release --features cuda -- --sample-len 500 --cache . Finished release [optimized] target(s) in 0.24s Running `/root/candle/target/release/examples/glm4 --sample-len 500 --cache .` avx: true, neon: false, simd128: false, f16c: true temp: 0.60 repeat-penalty: 1.20 repeat-last-n: 64 cache path . retrieved the files in 6.88963ms loaded the model in 6.113752297s starting the inference loop [欢迎使用GLM-4,请输入prompt] 请你告诉我什么是FFT 266 tokens generated (34.50 token/s) Result: 。Fast Fourier Transform (FFT) 是一种快速计算离散傅里叶变换(DFT)的方法,它广泛应用于信号处理、图像处理和数据分析等领域。 具体来说,FFT是一种将时域数据转换为频域数据的算法。在数字信号处理中,我们通常需要知道信号的频率成分,这就需要进行傅立叶变换。传统的傅立叶变换的计算复杂度较高,而 FFT 则大大提高了计算效率,使得大规模的 DFT 换成为可能。 以下是使用 Python 中的 numpy 进行 FFT 的简单示例: ```python import numpy as np # 创建一个时域信号 t = np.linspace(0, 1, num=100) f = np.sin(2*np.pi*5*t) + 3*np.cos(2*np.pi*10*t) # 对该信号做FFT变换,并计算其幅值谱 fft_result = np.fft.fftshift(np.abs(np.fft.fft(f))) ``` 在这个例子中,我们首先创建了一个时域信号 f。然后我们对这个信号进行了 FFT 换,得到了一个频域结果 fft_result。 #+end_src This example will read prompt from stdin * Citation #+begin_src @misc{glm2024chatglm, title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools}, author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang}, year={2024}, eprint={2406.12793}, archivePrefix={arXiv}, primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'} } #+end_src #+begin_src @misc{wang2023cogvlm, title={CogVLM: Visual Expert for Pretrained Language Models}, author={Weihan Wang and Qingsong Lv and Wenmeng Yu and Wenyi Hong and Ji Qi and Yan Wang and Junhui Ji and Zhuoyi Yang and Lei Zhao and Xixuan Song and Jiazheng Xu and Bin Xu and Juanzi Li and Yuxiao Dong and Ming Ding and Jie Tang}, year={2023}, eprint={2311.03079}, archivePrefix={arXiv}, primaryClass={cs.CV} } #+end_src
candle/candle-examples/examples/glm4/README.org/0
{ "file_path": "candle/candle-examples/examples/glm4/README.org", "repo_id": "candle", "token_count": 1720 }
26
pub mod constants; pub mod conversation; pub mod image_processor; use candle_transformers::generation::{LogitsProcessor, Sampling}; use candle_transformers::models::llama::Cache; use anyhow::{bail, Error as E, Result}; use candle::{DType, Device, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::llava::config::{ HFGenerationConfig, HFLLaVAConfig, HFPreProcessorConfig, }; use candle_transformers::models::llava::{config::LLaVAConfig, LLaVA}; use clap::Parser; use constants::*; use conversation::Conversation; use hf_hub::api::sync::Api; use image_processor::{process_image, ImageProcessor}; use std::io::Write; use tokenizers::Tokenizer; #[derive(Parser, Debug)] #[command(author, version, about,long_about=None)] struct Args { #[arg(long, default_value = "llava-hf/llava-v1.6-vicuna-7b-hf")] model_path: String, #[arg(long, default_value = "tokenizer/tokenizer.json")] tokenizer_path: String, #[arg(long)] model_base: Option<String>, #[arg(long)] image_file: String, // Required #[arg(long)] conv_mode: Option<String>, #[arg(long, default_value_t = 0.2)] temperature: f32, #[arg(long, default_value_t = 512)] max_new_tokens: usize, #[arg(long, action)] hf: bool, #[arg(long, action)] cpu: bool, #[arg(long, action)] no_kv_cache: bool, #[arg(long)] prompt: String, /// The seed to use when generating random samples. Copy from candle llama. Not exist in python llava. #[arg(long, default_value_t = 299792458)] seed: u64, } //from https://github.com/huggingface/candle/blob/main/candle-examples/examples/clip/main.rs fn load_image<T: AsRef<std::path::Path>>( path: T, processor: &ImageProcessor, llava_config: &LLaVAConfig, dtype: DType, ) -> Result<((u32, u32), Tensor)> { let img = image::ImageReader::open(path)?.decode()?; let img_tensor = process_image(&img, processor, llava_config)?; Ok(((img.width(), img.height()), img_tensor.to_dtype(dtype)?)) } fn get_model_name_from_path(model_path: &str) -> String { let model_paths: Vec<String> = model_path .trim_matches('/') .split('/') .map(|s| s.to_string()) .collect(); if model_paths.last().unwrap().starts_with("checkpoint-") { format!( "{}_{}", model_paths[model_paths.len() - 2], model_paths.last().unwrap() ) } else { model_paths.last().unwrap().to_string() } } fn duplicate_vec<T>(vec: &[T], n: usize) -> Vec<T> where T: Clone, { let mut res = Vec::new(); for _ in 0..n { res.extend(vec.to_owned()); } res } fn insert_separator<T>(x: Vec<Vec<T>>, sep: Vec<T>) -> Vec<Vec<T>> where T: Clone, { let sep = vec![sep]; let sep = duplicate_vec(&sep, x.len()); let mut res = x .iter() .zip(sep.iter()) .flat_map(|(x, y)| vec![x.clone(), y.clone()]) .collect::<Vec<Vec<T>>>(); res.pop(); res } fn tokenizer_image_token( prompt: &str, tokenizer: &Tokenizer, image_token_index: i64, llava_config: &LLaVAConfig, ) -> Result<Tensor> { let prompt_chunks = prompt .split("<image>") .map(|s| { tokenizer .encode(s, true) .unwrap() .get_ids() .to_vec() .iter() .map(|x| *x as i64) .collect() }) .collect::<Vec<Vec<i64>>>(); let mut input_ids = Vec::new(); let mut offset = 0; if !prompt_chunks.is_empty() && !prompt_chunks[0].is_empty() && prompt_chunks[0][0] == llava_config.bos_token_id as i64 { offset = 1; input_ids.push(prompt_chunks[0][0]); } for x in insert_separator( prompt_chunks, duplicate_vec(&[image_token_index], offset + 1), ) .iter() { input_ids.extend(x[1..].to_vec()) } let input_len = input_ids.len(); Tensor::from_vec(input_ids, (1, input_len), &Device::Cpu).map_err(E::msg) } fn main() -> Result<()> { let mut args = Args::parse(); let device = candle_examples::device(args.cpu)?; println!("Start loading model"); let api = Api::new()?; let api = api.model(args.model_path.clone()); let (llava_config, tokenizer, clip_vision_config, image_processor) = if args.hf { let config_filename = api.get("config.json")?; let hf_llava_config: HFLLaVAConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?; let generation_config_filename = api.get("generation_config.json")?; let generation_config: HFGenerationConfig = serde_json::from_slice(&std::fs::read(generation_config_filename)?)?; let preprocessor_config_filename = api.get("preprocessor_config.json")?; let preprocessor_config: HFPreProcessorConfig = serde_json::from_slice(&std::fs::read(preprocessor_config_filename)?)?; let llava_config = hf_llava_config.to_llava_config(&generation_config, &preprocessor_config); let tokenizer_filename = api.get("tokenizer.json")?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let clip_vision_config = hf_llava_config.to_clip_vision_config(); ( llava_config, tokenizer, Some(clip_vision_config), ImageProcessor::from_hf_preprocessor_config(&preprocessor_config), ) } else { let config_filename = api.get("config.json")?; let llava_config: LLaVAConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?; let tokenizer = Tokenizer::from_file(&args.tokenizer_path) .map_err(|e| E::msg(format!("Error loading {}: {}", &args.tokenizer_path, e)))?; ( llava_config.clone(), tokenizer, None, ImageProcessor::from_pretrained(&llava_config.mm_vision_tower.unwrap())?, ) }; let llama_config = llava_config.to_llama_config(); let dtype: DType = match llava_config.torch_dtype.as_str() { "float16" => DType::F16, "bfloat16" => DType::BF16, _ => bail!("unsupported dtype"), }; let eos_token_id = llava_config.eos_token_id; println!("setting kv cache"); let mut cache = Cache::new(!args.no_kv_cache, dtype, &llama_config, &device)?; println!("loading model weights"); let weight_filenames = candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&weight_filenames, dtype, &device)? }; let llava: LLaVA = LLaVA::load(vb, &llava_config, clip_vision_config)?; println!("generating conv template"); let image_token_se = format!( "{}{}{}", DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_END_TOKEN ); let qs = if args.prompt.contains(IMAGE_PLACEHOLDER) { if llava_config.mm_use_im_start_end { args.prompt.replace(IMAGE_PLACEHOLDER, &image_token_se) } else { args.prompt.replace(IMAGE_PLACEHOLDER, DEFAULT_IMAGE_TOKEN) } } else if llava_config.mm_use_im_start_end { format!("{}\n{}", image_token_se, args.prompt) } else { format!("{}\n{}", DEFAULT_IMAGE_TOKEN, args.prompt) }; let model_name = get_model_name_from_path(&args.model_path).to_lowercase(); let conv_mode = if model_name.contains("llama-2") { "llava_llama_2" } else if model_name.contains("mistral") { "mistral_instruct" } else if model_name.contains("v1.6-34b") { "chatml_direct" } else if model_name.contains("v1") { "llava_v1" } else if model_name.contains("mpt") { "mpt" } else { "llava_v0" }; if args.conv_mode.is_some() && args.conv_mode.as_deref() != Some(conv_mode) { println!( "Warning: the model is trained with {}, but you are using {}", conv_mode, args.conv_mode.as_deref().unwrap() ); } else { args.conv_mode = Some(conv_mode.to_string()); } let mut conv = match args.conv_mode { Some(conv_mode) => match conv_mode.as_str() { "chatml_direct" => Conversation::conv_chatml_direct(), "llava_v1" => Conversation::conv_llava_v1(), _ => todo!("not implement yet"), }, None => bail!("conv_mode is required"), }; conv.append_user_message(Some(&qs)); conv.append_assistant_message(None); let prompt = conv.get_prompt(); println!("loading image"); let (image_size, image_tensor) = load_image(&args.image_file, &image_processor, &llava_config, dtype) .map_err(|e| E::msg(format!("Error loading {}: {}", &args.image_file, e)))?; let image_tensor = image_tensor.to_device(&device)?; let mut logits_processor = { let temperature = f64::from(args.temperature); let sampling = if temperature <= 0. { Sampling::ArgMax } else { Sampling::All { temperature } }; LogitsProcessor::from_sampling(args.seed, sampling) }; // get input tokens let tokens = tokenizer_image_token( &prompt, &tokenizer, llava_config.image_token_index as i64, &llava_config, )?; let mut input_embeds = llava.prepare_inputs_labels_for_multimodal(&tokens, &[image_tensor], &[image_size])?; //inference loop, based on https://github.com/huggingface/candle/blob/main/candle-examples/examples/llama/main.rs let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer); let mut index_pos = 0; for index in 0..args.max_new_tokens { let (_, input_embeds_len, _) = input_embeds.dims3()?; let (context_size, context_index) = if cache.use_kv_cache && index > 0 { (1, index_pos) } else { (input_embeds_len, 0) }; let input = input_embeds.i((.., input_embeds_len.saturating_sub(context_size).., ..))?; let logits = llava.forward(&input, context_index, &mut cache)?; //[1,32000] let logits = logits.squeeze(0)?; let (_, input_len, _) = input.dims3()?; index_pos += input_len; let next_token = logits_processor.sample(&logits)?; let next_token_tensor = Tensor::from_vec(vec![next_token], 1, &device)?; let next_embeds = llava.llama.embed(&next_token_tensor)?.unsqueeze(0)?; input_embeds = Tensor::cat(&[input_embeds, next_embeds], 1)?; if next_token == eos_token_id as u32 { break; } if let Some(t) = tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } Ok(()) }
candle/candle-examples/examples/llava/main.rs/0
{ "file_path": "candle/candle-examples/examples/llava/main.rs", "repo_id": "candle", "token_count": 5097 }
27
// This should reach 91.5% accuracy. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use rand::prelude::*; use candle::{DType, Result, Tensor, D}; use candle_nn::{loss, ops, Conv2d, Linear, Module, ModuleT, Optimizer, VarBuilder, VarMap}; const IMAGE_DIM: usize = 784; const LABELS: usize = 10; fn linear_z(in_dim: usize, out_dim: usize, vs: VarBuilder) -> Result<Linear> { let ws = vs.get_with_hints((out_dim, in_dim), "weight", candle_nn::init::ZERO)?; let bs = vs.get_with_hints(out_dim, "bias", candle_nn::init::ZERO)?; Ok(Linear::new(ws, Some(bs))) } trait Model: Sized { fn new(vs: VarBuilder) -> Result<Self>; fn forward(&self, xs: &Tensor) -> Result<Tensor>; } struct LinearModel { linear: Linear, } impl Model for LinearModel { fn new(vs: VarBuilder) -> Result<Self> { let linear = linear_z(IMAGE_DIM, LABELS, vs)?; Ok(Self { linear }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.linear.forward(xs) } } struct Mlp { ln1: Linear, ln2: Linear, } impl Model for Mlp { fn new(vs: VarBuilder) -> Result<Self> { let ln1 = candle_nn::linear(IMAGE_DIM, 100, vs.pp("ln1"))?; let ln2 = candle_nn::linear(100, LABELS, vs.pp("ln2"))?; Ok(Self { ln1, ln2 }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.ln1.forward(xs)?; let xs = xs.relu()?; self.ln2.forward(&xs) } } #[derive(Debug)] struct ConvNet { conv1: Conv2d, conv2: Conv2d, fc1: Linear, fc2: Linear, dropout: candle_nn::Dropout, } impl ConvNet { fn new(vs: VarBuilder) -> Result<Self> { let conv1 = candle_nn::conv2d(1, 32, 5, Default::default(), vs.pp("c1"))?; let conv2 = candle_nn::conv2d(32, 64, 5, Default::default(), vs.pp("c2"))?; let fc1 = candle_nn::linear(1024, 1024, vs.pp("fc1"))?; let fc2 = candle_nn::linear(1024, LABELS, vs.pp("fc2"))?; let dropout = candle_nn::Dropout::new(0.5); Ok(Self { conv1, conv2, fc1, fc2, dropout, }) } fn forward(&self, xs: &Tensor, train: bool) -> Result<Tensor> { let (b_sz, _img_dim) = xs.dims2()?; let xs = xs .reshape((b_sz, 1, 28, 28))? .apply(&self.conv1)? .max_pool2d(2)? .apply(&self.conv2)? .max_pool2d(2)? .flatten_from(1)? .apply(&self.fc1)? .relu()?; self.dropout.forward_t(&xs, train)?.apply(&self.fc2) } } struct TrainingArgs { learning_rate: f64, load: Option<String>, save: Option<String>, epochs: usize, } fn training_loop_cnn( m: candle_datasets::vision::Dataset, args: &TrainingArgs, ) -> anyhow::Result<()> { const BSIZE: usize = 64; let dev = candle::Device::cuda_if_available(0)?; let train_labels = m.train_labels; let train_images = m.train_images.to_device(&dev)?; let train_labels = train_labels.to_dtype(DType::U32)?.to_device(&dev)?; let mut varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, &dev); let model = ConvNet::new(vs.clone())?; if let Some(load) = &args.load { println!("loading weights from {load}"); varmap.load(load)? } let adamw_params = candle_nn::ParamsAdamW { lr: args.learning_rate, ..Default::default() }; let mut opt = candle_nn::AdamW::new(varmap.all_vars(), adamw_params)?; let test_images = m.test_images.to_device(&dev)?; let test_labels = m.test_labels.to_dtype(DType::U32)?.to_device(&dev)?; let n_batches = train_images.dim(0)? / BSIZE; let mut batch_idxs = (0..n_batches).collect::<Vec<usize>>(); for epoch in 1..args.epochs { let mut sum_loss = 0f32; batch_idxs.shuffle(&mut thread_rng()); for batch_idx in batch_idxs.iter() { let train_images = train_images.narrow(0, batch_idx * BSIZE, BSIZE)?; let train_labels = train_labels.narrow(0, batch_idx * BSIZE, BSIZE)?; let logits = model.forward(&train_images, true)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; let loss = loss::nll(&log_sm, &train_labels)?; opt.backward_step(&loss)?; sum_loss += loss.to_vec0::<f32>()?; } let avg_loss = sum_loss / n_batches as f32; let test_logits = model.forward(&test_images, false)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_labels)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_labels.dims1()? as f32; println!( "{epoch:4} train loss {:8.5} test acc: {:5.2}%", avg_loss, 100. * test_accuracy ); } if let Some(save) = &args.save { println!("saving trained weights in {save}"); varmap.save(save)? } Ok(()) } fn training_loop<M: Model>( m: candle_datasets::vision::Dataset, args: &TrainingArgs, ) -> anyhow::Result<()> { let dev = candle::Device::cuda_if_available(0)?; let train_labels = m.train_labels; let train_images = m.train_images.to_device(&dev)?; let train_labels = train_labels.to_dtype(DType::U32)?.to_device(&dev)?; let mut varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, &dev); let model = M::new(vs.clone())?; if let Some(load) = &args.load { println!("loading weights from {load}"); varmap.load(load)? } let mut sgd = candle_nn::SGD::new(varmap.all_vars(), args.learning_rate)?; let test_images = m.test_images.to_device(&dev)?; let test_labels = m.test_labels.to_dtype(DType::U32)?.to_device(&dev)?; for epoch in 1..args.epochs { let logits = model.forward(&train_images)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; let loss = loss::nll(&log_sm, &train_labels)?; sgd.backward_step(&loss)?; let test_logits = model.forward(&test_images)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_labels)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_labels.dims1()? as f32; println!( "{epoch:4} train loss: {:8.5} test acc: {:5.2}%", loss.to_scalar::<f32>()?, 100. * test_accuracy ); } if let Some(save) = &args.save { println!("saving trained weights in {save}"); varmap.save(save)? } Ok(()) } #[derive(ValueEnum, Clone)] enum WhichModel { Linear, Mlp, Cnn, } #[derive(Parser)] struct Args { #[clap(value_enum, default_value_t = WhichModel::Linear)] model: WhichModel, #[arg(long)] learning_rate: Option<f64>, #[arg(long, default_value_t = 200)] epochs: usize, /// The file where to save the trained weights, in safetensors format. #[arg(long)] save: Option<String>, /// The file where to load the trained weights from, in safetensors format. #[arg(long)] load: Option<String>, /// The directory where to load the dataset from, in ubyte format. #[arg(long)] local_mnist: Option<String>, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); // Load the dataset let m = if let Some(directory) = args.local_mnist { candle_datasets::vision::mnist::load_dir(directory)? } else { candle_datasets::vision::mnist::load()? }; println!("train-images: {:?}", m.train_images.shape()); println!("train-labels: {:?}", m.train_labels.shape()); println!("test-images: {:?}", m.test_images.shape()); println!("test-labels: {:?}", m.test_labels.shape()); let default_learning_rate = match args.model { WhichModel::Linear => 1., WhichModel::Mlp => 0.05, WhichModel::Cnn => 0.001, }; let training_args = TrainingArgs { epochs: args.epochs, learning_rate: args.learning_rate.unwrap_or(default_learning_rate), load: args.load, save: args.save, }; match args.model { WhichModel::Linear => training_loop::<LinearModel>(m, &training_args), WhichModel::Mlp => training_loop::<Mlp>(m, &training_args), WhichModel::Cnn => training_loop_cnn(m, &training_args), } }
candle/candle-examples/examples/mnist-training/main.rs/0
{ "file_path": "candle/candle-examples/examples/mnist-training/main.rs", "repo_id": "candle", "token_count": 4094 }
28
# candle-parler-tts [Parler-TTS](https://huggingface.co/parler-tts/parler-tts-large-v1) is a large text-to-speech model with 2.2B parameters trained on ~45K hours of audio data. The voice can be controlled by a text prompt. ## Run an example ```bash cargo run --example parler-tts -r -- \ --prompt "Hey, how are you doing today?" ``` In order to specify some prompt for the voice, use the `--description` argument. ```bash cargo run --example parler-tts -r -- \ --prompt "Hey, how are you doing today?" \ --description "A female speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. The recording is of very high quality, with the speaker's voice sounding clear and very close up." ``` https://github.com/user-attachments/assets/1b16aeac-70a3-4803-8589-4563279bba33
candle/candle-examples/examples/parler-tts/README.md/0
{ "file_path": "candle/candle-examples/examples/parler-tts/README.md", "repo_id": "candle", "token_count": 260 }
29
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::quantized_recurrent_gemma::Model as QModel; use candle_transformers::models::recurrent_gemma::{Config, Model as BModel}; use candle::{DType, Device, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; enum Model { B(BModel), Q(QModel), } impl Model { fn forward(&mut self, xs: &Tensor, pos: usize) -> candle::Result<Tensor> { match self { Self::B(m) => m.forward(xs, pos), Self::Q(m) => m.forward(xs, pos), } } } #[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)] enum Which { #[value(name = "2b")] Base2B, #[value(name = "2b-it")] Instruct2B, } struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, top_k: usize, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let sampling = match temp { None => candle_transformers::generation::Sampling::ArgMax, Some(temperature) => match top_p { None => candle_transformers::generation::Sampling::TopK { temperature, k: top_k, }, Some(top_p) => candle_transformers::generation::Sampling::TopKThenTopP { temperature, k: top_k, p: top_p, }, }, }; let logits_processor = LogitsProcessor::from_sampling(seed, sampling); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<eos>") { Some(token) => token, None => anyhow::bail!("cannot find the <eos> token"), }; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, start_pos)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, #[arg(long, default_value_t = 250)] top_k: usize, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 8000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long)] weight_files: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The model to use. #[arg(long, default_value = "2b")] which: Which, #[arg(long)] quantized: bool, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match &args.model_id { Some(model_id) => model_id.to_string(), None => match args.which { Which::Base2B => "google/recurrentgemma-2b".to_string(), Which::Instruct2B => "google/recurrentgemma-2b-it".to_string(), }, }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { if args.quantized { let filename = match args.which { Which::Base2B => "recurrent-gemma-2b-q4k.gguf", Which::Instruct2B => "recurrent-gemma-7b-q4k.gguf", }; let filename = api.model("lmz/candle-gemma".to_string()).get(filename)?; vec![filename] } else { candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")? } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let config: Config = serde_json::from_reader(std::fs::File::open(config_filename)?)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &filenames[0], &device, )?; Model::Q(QModel::new(&config, vb.pp("model"))?) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; Model::B(BModel::new(&config, vb.pp("model"))?) }; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.top_k, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/recurrent-gemma/main.rs/0
{ "file_path": "candle/candle-examples/examples/recurrent-gemma/main.rs", "repo_id": "candle", "token_count": 4698 }
30
## candle-rwkv The [RWKV model](https://wiki.rwkv.com/) is a recurrent neural network model with performance on par with transformer architectures. Several variants are available, candle implements the v5 and v6 versions and can be used with Eagle 7B([blog post](https://blog.rwkv.com/p/eagle-7b-soaring-past-transformers)). ```bash $ cargo run --example rwkv --release -- --prompt "The smallest prime is " avx: true, neon: false, simd128: false, f16c: true temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64 The smallest prime is ϕ(2) = 2. The smallest composite is ϕ(3) = 3. The smallest perfect number is ϕ(5) = 5. The smallest perfect square is ϕ(4) = 4. The smallest perfect cube is ϕ(6) = 6. ```
candle/candle-examples/examples/rwkv/README.md/0
{ "file_path": "candle/candle-examples/examples/rwkv/README.md", "repo_id": "candle", "token_count": 235 }
31
# Get the checkpoint from # https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt import torch from safetensors.torch import save_file data = torch.load("tiny.en.pt") weights = {} for k, v in data["model_state_dict"].items(): weights[k] = v.contiguous() print(k, v.shape, v.dtype) save_file(weights, "tiny.en.safetensors") print(data["dims"])
candle/candle-examples/examples/whisper/extract_weights.py/0
{ "file_path": "candle/candle-examples/examples/whisper/extract_weights.py", "repo_id": "candle", "token_count": 183 }
32
#include <cmath> #include <cute/tensor.hpp> #include <cutlass/cutlass.h> #include <cutlass/array.h> #include "utils.h" namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_causal> struct Alibi { const float alibi_slope; const int max_seqlen_k, max_seqlen_q; __forceinline__ __device__ Alibi(const float alibi_slope, const int max_seqlen_k, const int max_seqlen_q) : alibi_slope(alibi_slope) , max_seqlen_k(max_seqlen_k) , max_seqlen_q(max_seqlen_q) { }; template <typename Engine, typename Layout> __forceinline__ __device__ void apply_alibi(Tensor<Engine, Layout> &tensor, const int col_idx_offset_, const int row_idx_offset, const int warp_row_stride) { // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const int lane_id = threadIdx.x % 32; const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; if constexpr (Is_causal) { // Simpler, we add the same bias vector to all rows #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx; } } } } else { // Bias depends on both row_idx and col_idx #pragma unroll for (int mi = 0; mi < size<0, 1>(tensor); ++mi) { const int row_idx_base = row_idx_offset + mi * warp_row_stride; #pragma unroll for (int i = 0; i < size<0, 0>(tensor); ++i) { const int row_idx = row_idx_base + i * 8; #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx); } } } } } } }; } // namespace flash
candle/candle-flash-attn/kernels/alibi.h/0
{ "file_path": "candle/candle-flash-attn/kernels/alibi.h", "repo_id": "candle", "token_count": 1556 }
33
// Inspired by // https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h // and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h #pragma once /// @param COND - a boolean expression to switch by /// @param CONST_NAME - a name given for the constexpr bool variable. /// @param ... - code to execute for true and false /// /// Usage: /// ``` /// BOOL_SWITCH(flag, BoolConst, [&] { /// some_function<BoolConst>(...); /// }); /// ``` #define BOOL_SWITCH(COND, CONST_NAME, ...) \ [&] { \ if (COND) { \ constexpr static bool CONST_NAME = true; \ return __VA_ARGS__(); \ } else { \ constexpr static bool CONST_NAME = false; \ return __VA_ARGS__(); \ } \ }() #ifdef FLASHATTENTION_DISABLE_DROPOUT #define DROPOUT_SWITCH(COND, CONST_NAME, ...) \ [&] { \ constexpr static bool CONST_NAME = false; \ return __VA_ARGS__(); \ }() #else #define DROPOUT_SWITCH BOOL_SWITCH #endif #ifdef FLASHATTENTION_DISABLE_ALIBI #define ALIBI_SWITCH(COND, CONST_NAME, ...) \ [&] { \ constexpr static bool CONST_NAME = false; \ return __VA_ARGS__(); \ }() #else #define ALIBI_SWITCH BOOL_SWITCH #endif #ifdef FLASHATTENTION_DISABLE_UNEVEN_K #define EVENK_SWITCH(COND, CONST_NAME, ...) \ [&] { \ constexpr static bool CONST_NAME = true; \ return __VA_ARGS__(); \ }() #else #define EVENK_SWITCH BOOL_SWITCH #endif #ifdef FLASHATTENTION_DISABLE_SOFTCAP #define SOFTCAP_SWITCH(COND, CONST_NAME, ...) \ [&] { \ constexpr static bool CONST_NAME = false; \ return __VA_ARGS__(); \ }() #else #define SOFTCAP_SWITCH BOOL_SWITCH #endif #ifdef FLASHATTENTION_DISABLE_LOCAL #define LOCAL_SWITCH(COND, CONST_NAME, ...) \ [&] { \ constexpr static bool CONST_NAME = false; \ return __VA_ARGS__(); \ }() #else #define LOCAL_SWITCH BOOL_SWITCH #endif #define FP16_SWITCH(COND, ...) \ [&] { \ if (COND) { \ using elem_type = cutlass::half_t; \ return __VA_ARGS__(); \ } else { \ using elem_type = cutlass::bfloat16_t; \ return __VA_ARGS__(); \ } \ }() #define HEADDIM_SWITCH(HEADDIM, ...) \ [&] { \ if (HEADDIM <= 32) { \ constexpr static int kHeadDim = 32; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 64) { \ constexpr static int kHeadDim = 64; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 96) { \ constexpr static int kHeadDim = 96; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 128) { \ constexpr static int kHeadDim = 128; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 160) { \ constexpr static int kHeadDim = 160; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 192) { \ constexpr static int kHeadDim = 192; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 224) { \ constexpr static int kHeadDim = 224; \ return __VA_ARGS__(); \ } else if (HEADDIM <= 256) { \ constexpr static int kHeadDim = 256; \ return __VA_ARGS__(); \ } \ }()
candle/candle-flash-attn/kernels/static_switch.h/0
{ "file_path": "candle/candle-flash-attn/kernels/static_switch.h", "repo_id": "candle", "token_count": 2335 }
34
// WARNING: THIS IS ONLY VALID ASSUMING THAT inp IS CONTIGUOUS! // TODO: proper error reporting when ids are larger than v_size. #include "cuda_utils.cuh" #include<stdint.h> template<typename T, typename I> __device__ void index_select( const size_t numel, const size_t num_dims, const size_t *info, const I *ids, const T *inp, T *out, const size_t left_size, const size_t src_dim_size, const size_t ids_dim_size, const size_t right_size ) { const size_t *dims = info; const size_t *strides = info + num_dims; bool b = is_contiguous(num_dims, dims, strides); for (unsigned int dst_i = blockIdx.x * blockDim.x + threadIdx.x; dst_i < numel; dst_i += blockDim.x * gridDim.x) { unsigned int left_i = dst_i / (ids_dim_size * right_size); unsigned int id_i = dst_i / right_size % ids_dim_size; unsigned int right_i = dst_i % right_size; unsigned int src_i = left_i * (src_dim_size * right_size) + ids[id_i] * right_size + right_i; unsigned strided_i = b ? src_i : get_strided_index(src_i, num_dims, dims, strides); out[dst_i] = inp[strided_i]; } } #define IS_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const INDEX_TYPENAME *ids, \ const TYPENAME *inp, \ TYPENAME *out, \ const size_t left_size, \ const size_t src_dim_size, \ const size_t ids_dim_size, \ const size_t right_size \ ) { index_select(numel, num_dims, info, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \ template<typename T, typename I> __device__ void gather( const size_t numel, const I *ids, const T *inp, T *out, const size_t left_size, const size_t src_dim_size, const size_t ids_dim_size, const size_t right_size ) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { size_t post = i % right_size; size_t idx = ids[i]; size_t pre = i / (right_size * ids_dim_size); size_t src_i = (pre * src_dim_size + idx) * right_size + post; out[i] = inp[src_i]; } } #define GATHER_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const INDEX_TYPENAME *ids, \ const TYPENAME *inp, \ TYPENAME *out, \ const size_t left_size, \ const size_t src_dim_size, \ const size_t ids_dim_size, \ const size_t right_size \ ) { gather(numel, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \ template<typename T, typename I> __device__ void index_add( const I *ids, const size_t ids_dim_size, const T *inp, T *out, const size_t left_size, const size_t src_dim_size, const size_t dst_dim_size, const size_t right_size ) { const size_t numel = left_size * right_size; for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { const size_t pre = i / right_size; const size_t post = i % right_size; for (unsigned int j = 0; j < ids_dim_size; ++j) { const size_t idx = ids[j]; const size_t src_i = (pre * ids_dim_size + j) * right_size + post; const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post; out[dst_i] += inp[src_i]; } } } #define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const INDEX_TYPENAME *ids, \ const size_t ids_dim_size, \ const TYPENAME *inp, \ TYPENAME *out, \ const size_t left_size, \ const size_t src_dim_size, \ const size_t dst_dim_size, \ const size_t right_size \ ) { index_add(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \ template<typename T, typename I> __device__ void scatter_add( const I *ids, const T *inp, T *out, const size_t left_size, const size_t src_dim_size, const size_t dst_dim_size, const size_t right_size ) { const size_t numel = left_size * right_size; for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { const size_t pre = i / right_size; const size_t post = i % right_size; for (unsigned int j = 0; j < src_dim_size; ++j) { const size_t src_i = (pre * src_dim_size + j) * right_size + post; const size_t idx = ids[src_i]; const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post; out[dst_i] += inp[src_i]; } } } #define SA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const INDEX_TYPENAME *ids, \ const TYPENAME *inp, \ TYPENAME *out, \ const size_t left_size, \ const size_t src_dim_size, \ const size_t dst_dim_size, \ const size_t right_size \ ) { scatter_add(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \ #if __CUDA_ARCH__ >= 800 IS_OP(__nv_bfloat16, int64_t, is_i64_bf16) IS_OP(__nv_bfloat16, uint32_t, is_u32_bf16) IS_OP(__nv_bfloat16, uint8_t, is_u8_bf16) GATHER_OP(__nv_bfloat16, int64_t, gather_i64_bf16) GATHER_OP(__nv_bfloat16, uint32_t, gather_u32_bf16) GATHER_OP(__nv_bfloat16, uint8_t, gather_u8_bf16) IA_OP(__nv_bfloat16, int64_t, ia_i64_bf16) IA_OP(__nv_bfloat16, uint32_t, ia_u32_bf16) IA_OP(__nv_bfloat16, uint8_t, ia_u8_bf16) SA_OP(__nv_bfloat16, int64_t, sa_i64_bf16) SA_OP(__nv_bfloat16, uint32_t, sa_u32_bf16) SA_OP(__nv_bfloat16, uint8_t, sa_u8_bf16) #endif #if __CUDA_ARCH__ >= 530 IS_OP(__half, int64_t, is_i64_f16) IS_OP(__half, uint32_t, is_u32_f16) IS_OP(__half, uint8_t, is_u8_f16) GATHER_OP(__half, int64_t, gather_i64_f16) GATHER_OP(__half, uint32_t, gather_u32_f16) GATHER_OP(__half, uint8_t, gather_u8_f16) IA_OP(__half, int64_t, ia_i64_f16) IA_OP(__half, uint32_t, ia_u32_f16) IA_OP(__half, uint8_t, ia_u8_f16) SA_OP(__half, int64_t, sa_i64_f16) SA_OP(__half, uint32_t, sa_u32_f16) SA_OP(__half, uint8_t, sa_u8_f16) #endif IS_OP(float, int64_t, is_i64_f32) IS_OP(double, int64_t, is_i64_f64) IS_OP(uint8_t, int64_t, is_i64_u8) IS_OP(uint32_t, int64_t, is_i64_u32) IS_OP(int64_t, int64_t, is_i64_i64) IS_OP(float, uint32_t, is_u32_f32) IS_OP(double, uint32_t, is_u32_f64) IS_OP(uint8_t, uint32_t, is_u32_u8) IS_OP(int64_t, uint32_t, is_u32_i64) IS_OP(uint32_t, uint32_t, is_u32_u32) IS_OP(float, uint8_t, is_u8_f32) IS_OP(double, uint8_t, is_u8_f64) IS_OP(uint8_t, uint8_t, is_u8_u8) IS_OP(uint32_t, uint8_t, is_u8_u32) IS_OP(int64_t, uint8_t, is_u8_i64) GATHER_OP(float, int64_t, gather_i64_f32) GATHER_OP(double, int64_t, gather_i64_f64) GATHER_OP(uint8_t, int64_t, gather_i64_u8) GATHER_OP(uint32_t, int64_t, gather_i64_u32) GATHER_OP(int64_t, int64_t, gather_i64_i64) GATHER_OP(float, uint32_t, gather_u32_f32) GATHER_OP(double, uint32_t, gather_u32_f64) GATHER_OP(uint8_t, uint32_t, gather_u32_u8) GATHER_OP(int64_t, uint32_t, gather_u32_i64) GATHER_OP(uint32_t, uint32_t, gather_u32_u32) GATHER_OP(float, uint8_t, gather_u8_f32) GATHER_OP(double, uint8_t, gather_u8_f64) GATHER_OP(uint8_t, uint8_t, gather_u8_u8) GATHER_OP(uint32_t, uint8_t, gather_u8_u32) GATHER_OP(int64_t, uint8_t, gather_u8_i64) IA_OP(float, int64_t, ia_i64_f32) IA_OP(double, int64_t, ia_i64_f64) IA_OP(uint8_t, int64_t, ia_i64_u8) IA_OP(int64_t, int64_t, ia_i64_i64) IA_OP(uint32_t, int64_t, ia_i64_u32) IA_OP(float, uint32_t, ia_u32_f32) IA_OP(double, uint32_t, ia_u32_f64) IA_OP(uint8_t, uint32_t, ia_u32_u8) IA_OP(int64_t, uint32_t, ia_u32_i64) IA_OP(uint32_t, uint32_t, ia_u32_u32) IA_OP(float, uint8_t, ia_u8_f32) IA_OP(double, uint8_t, ia_u8_f64) IA_OP(uint8_t, uint8_t, ia_u8_u8) IA_OP(uint32_t, uint8_t, ia_u8_u32) IA_OP(int64_t, uint8_t, ia_u8_i64) SA_OP(float, int64_t, sa_i64_f32) SA_OP(double, int64_t, sa_i64_f64) SA_OP(uint8_t, int64_t, sa_i64_u8) SA_OP(int64_t, int64_t, sa_i64_i64) SA_OP(uint32_t, int64_t, sa_i64_u32) SA_OP(float, uint32_t, sa_u32_f32) SA_OP(double, uint32_t, sa_u32_f64) SA_OP(uint8_t, uint32_t, sa_u32_u8) SA_OP(int64_t, uint32_t, sa_u32_i64) SA_OP(uint32_t, uint32_t, sa_u32_u32) SA_OP(float, uint8_t, sa_u8_f32) SA_OP(double, uint8_t, sa_u8_f64) SA_OP(uint8_t, uint8_t, sa_u8_u8) SA_OP(uint32_t, uint8_t, sa_u8_u32) SA_OP(int64_t, uint8_t, sa_u8_i64)
candle/candle-kernels/src/indexing.cu/0
{ "file_path": "candle/candle-kernels/src/indexing.cu", "repo_id": "candle", "token_count": 4357 }
35
// Imported from https://github.com/ggerganov/llama.cpp/blob/master/ggml-metal.metal #include <metal_stdlib> using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) #define MIN(x, y) ((x) < (y) ? (x) : (y)) #define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } #define QK4_0 32 #define QR4_0 2 typedef struct { half d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; #define QK4_1 32 typedef struct { half d; // delta half m; // min uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; #define QK5_0 32 typedef struct { half d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; #define QK5_1 32 typedef struct { half d; // delta half m; // min uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; #define QK8_0 32 typedef struct { half d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; #define N_SIMDWIDTH 32 // assuming SIMD group size is 32 enum ggml_sort_order { GGML_SORT_ASC, GGML_SORT_DESC, }; // general-purpose kernel for addition, multiplication and division of two tensors // pros: works for non-contiguous tensors, supports broadcast across all dims // cons: not very efficient kernel void kernel_add( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int64_t & offs, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + offs; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + offs; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { const int i10 = i0 % ne10; *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) + *((device float *)(src1_ptr + i10*nb10)); } } kernel void kernel_mul( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { const int i10 = i0 % ne10; *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) * *((device float *)(src1_ptr + i10*nb10)); } } kernel void kernel_div( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { const int i10 = i0 % ne10; *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) / *((device float *)(src1_ptr + i10*nb10)); } } // assumption: src1 is a row // broadcast src1 into src0 kernel void kernel_add_row( device const float4 * src0, device const float4 * src1, device float4 * dst, constant uint64_t & nb [[buffer(28)]], uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] + src1[tpig % nb]; } kernel void kernel_mul_row( device const float4 * src0, device const float4 * src1, device float4 * dst, constant uint64_t & nb [[buffer(28)]], uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * src1[tpig % nb]; } kernel void kernel_div_row( device const float4 * src0, device const float4 * src1, device float4 * dst, constant uint64_t & nb [[buffer(28)]], uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] / src1[tpig % nb]; } kernel void kernel_scale( device const float * src0, device float * dst, constant float & scale, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * scale; } kernel void kernel_scale_4( device const float4 * src0, device float4 * dst, constant float & scale, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * scale; } kernel void kernel_relu( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = max(0.0f, src0[tpig]); } kernel void kernel_tanh( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = precise::tanh(x); } constant float GELU_COEF_A = 0.044715f; constant float GELU_QUICK_COEF = -1.702f; constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; kernel void kernel_gelu( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; // BEWARE !!! // Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs! // This was observed with Falcon 7B and 40B models // dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } kernel void kernel_gelu_quick( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } kernel void kernel_silu( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = x / (1.0f + exp(-x)); } kernel void kernel_sqr( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * src0[tpig]; } kernel void kernel_sum_rows( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tpig[[thread_position_in_grid]]) { int64_t i3 = tpig.z; int64_t i2 = tpig.y; int64_t i1 = tpig.x; if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { return; } device const float * src_row = (device const float *) ((device const char *) src0 + i1*nb01 + i2*nb02 + i3*nb03); device float * dst_row = (device float *) ((device char *) dst + i1*nb1 + i2*nb2 + i3*nb3); float row_sum = 0; for (int64_t i0 = 0; i0 < ne00; i0++) { row_sum += src_row[i0]; } dst_row[0] = row_sum; } kernel void kernel_soft_max( device const float * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant float & scale, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t i03 = (tgpig) / (ne02*ne01); const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; device const float * pmask = src1 != src0 ? src1 + i01*ne00 : nullptr; device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; // parallel max float lmax = -INFINITY; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f)); } // find the max value in the block float max_val = simd_max(lmax); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = max_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = buf[tiisg]; max_val = simd_max(max_val); } // parallel sum float lsum = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f)) - max_val); lsum += exp_psrc0; pdst[i00] = exp_psrc0; } // This barrier fixes a failing test // ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335 threadgroup_barrier(mem_flags::mem_none); float sum = simd_sum(lsum); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); sum = buf[tiisg]; sum = simd_sum(sum); } const float inv_sum = 1.0f/sum; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { pdst[i00] *= inv_sum; } } kernel void kernel_soft_max_4( device const float * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant float & scale, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t i03 = (tgpig) / (ne02*ne01); const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); device const float4 * pmask = src1 != src0 ? (device const float4 *)(src1 + i01*ne00) : nullptr; device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); // parallel max float4 lmax4 = -INFINITY; for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f)); } const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); float max_val = simd_max(lmax); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = max_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = buf[tiisg]; max_val = simd_max(max_val); } // parallel sum float4 lsum4 = 0.0f; for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f)) - max_val); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3]; // This barrier fixes a failing test // ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335 threadgroup_barrier(mem_flags::mem_none); float sum = simd_sum(lsum); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); sum = buf[tiisg]; sum = simd_sum(sum); } const float inv_sum = 1.0f/sum; for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { pdst4[i00] *= inv_sum; } } kernel void kernel_diag_mask_inf( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int & n_past, uint3 tpig[[thread_position_in_grid]]) { const int64_t i02 = tpig[2]; const int64_t i01 = tpig[1]; const int64_t i00 = tpig[0]; if (i00 > n_past + i01) { dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; } else { dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; } } kernel void kernel_diag_mask_inf_8( device const float4 * src0, device float4 * dst, constant int64_t & ne00, constant int64_t & ne01, constant int & n_past, uint3 tpig[[thread_position_in_grid]]) { const int64_t i = 2*tpig[0]; dst[i+0] = src0[i+0]; dst[i+1] = src0[i+1]; int64_t i4 = 4*i; const int64_t i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01; const int64_t i01 = i4/(ne00); i4 -= i01*ne00; const int64_t i00 = i4; for (int k = 3; k >= 0; --k) { if (i00 + 4 + k <= n_past + i01) { break; } dst[i+1][k] = -INFINITY; if (i00 + k > n_past + i01) { dst[i][k] = -INFINITY; } } } kernel void kernel_norm( device const void * src0, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant float & eps, threadgroup float * sum [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01); // MEAN // parallel sum sum[tpitg] = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { sum[tpitg] += x[i00]; } // reduce threadgroup_barrier(mem_flags::mem_threadgroup); for (uint i = ntg/2; i > 0; i /= 2) { if (tpitg < i) { sum[tpitg] += sum[tpitg + i]; } threadgroup_barrier(mem_flags::mem_threadgroup); } const float mean = sum[0] / ne00; // recenter and VARIANCE threadgroup_barrier(mem_flags::mem_threadgroup); device float * y = dst + tgpig*ne00; sum[tpitg] = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { y[i00] = x[i00] - mean; sum[tpitg] += y[i00] * y[i00]; } // reduce threadgroup_barrier(mem_flags::mem_threadgroup); for (uint i = ntg/2; i > 0; i /= 2) { if (tpitg < i) { sum[tpitg] += sum[tpitg + i]; } threadgroup_barrier(mem_flags::mem_threadgroup); } const float variance = sum[0] / ne00; const float scale = 1.0f/sqrt(variance + eps); for (int i00 = tpitg; i00 < ne00; i00 += ntg) { y[i00] = y[i00] * scale; } } kernel void kernel_rms_norm( device const void * src0, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant float & eps, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01); float4 sumf = 0; float all_sum = 0; // parallel sum for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { sumf += x[i00] * x[i00]; } all_sum = sumf[0] + sumf[1] + sumf[2] + sumf[3]; all_sum = simd_sum(all_sum); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = all_sum; } threadgroup_barrier(mem_flags::mem_threadgroup); all_sum = buf[tiisg]; all_sum = simd_sum(all_sum); } const float mean = all_sum/ne00; const float scale = 1.0f/sqrt(mean + eps); device float4 * y = (device float4 *) (dst + tgpig*ne00); for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { y[i00] = x[i00] * scale; } } kernel void kernel_group_norm( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int32_t & n_groups, constant float & eps, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t ne = ne00*ne01*ne02; const int64_t gs = ne00*ne01*((ne02 + n_groups - 1) / n_groups); int start = tgpig * gs; int end = start + gs; start += tpitg; if (end >= ne) { end = ne; } float tmp = 0.0f; // partial sum for thread in warp for (int j = start; j < end; j += ntg) { tmp += src0[j]; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = simd_sum(tmp); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = tmp; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = buf[tiisg]; tmp = simd_sum(tmp); } const float mean = tmp / gs; tmp = 0.0f; for (int j = start; j < end; j += ntg) { float xi = src0[j] - mean; dst[j] = xi; tmp += xi * xi; } tmp = simd_sum(tmp); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = tmp; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = buf[tiisg]; tmp = simd_sum(tmp); } const float variance = tmp / gs; const float scale = 1.0f/sqrt(variance + eps); for (int j = start; j < end; j += ntg) { dst[j] *= scale; } } // function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F) + yl[i + 1] * (qs[i / 2] & 0x0F00); acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0) + yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (sumy * -8.f + acc[0] + acc[1]); } // function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float m = qb_curr->m; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F) + yl[i + 1] * (qs[i / 2] & 0x0F00); acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0) + yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (acc[0] + acc[1]) + sumy * m; } // function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q5 quants begin (0 or QK5_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2); const uint32_t qh = *((device const uint32_t *)qb_curr->qh); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)) + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)) + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); } return d * (sumy * -16.f + acc[0] + acc[1]); } // function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q5 quants begin (0 or QK5_1/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float m = qb_curr->m; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2); const uint32_t qh = *((device const uint32_t *)qb_curr->qh); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)) + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)) + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); } return d * (acc[0] + acc[1]) + sumy * m; } // putting them in the kernel cause a significant performance penalty #define N_DST 4 // each SIMD group works on 4 rows #define N_SIMDGROUP 2 // number of SIMD groups in a thread group //Note: This is a template, but strictly speaking it only applies to // quantizations where the block size is 32. It also does not // guard against the number of rows not being divisible by // N_DST, so this is another explicit assumption of the implementation. template<typename block_q_type, int nr, int nsg, int nw> void mul_vec_q_n_f32_impl( device const void * src0, device const float * src1, device float * dst, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint r2, uint r3, uint3 tgpig, uint tiisg, uint sgitg) { const int nb = ne00/QK4_0; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * nsg + sgitg) * nr; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q_type * x = (device const block_q_type *) src0 + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[16]; // src1 vector cache float sumf[nr] = {0.f}; const int ix = (tiisg/2); const int il = (tiisg%2)*8; device const float * yb = y + ix * QK4_0 + il; // each thread in a SIMD group deals with half a block. for (int ib = ix; ib < nb; ib += nw/2) { float sumy = 0; for (int i = 0; i < 8; i += 2) { sumy += yb[i] + yb[i+1]; yl[i+0] = yb[i+ 0]; yl[i+1] = yb[i+ 1]/256.f; sumy += yb[i+16] + yb[i+17]; yl[i+8] = yb[i+16]/16.f; yl[i+9] = yb[i+17]/4096.f; } for (int row = 0; row < nr; row++) { sumf[row] += block_q_n_dot_y(x+ib+row*nb, sumy, yl, il); } yb += QK4_0 * 16; } for (int row = 0; row < nr; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0 && first_row + row < ne01) { dst[im*ne0*ne1 + r1*ne0 + first_row + row] = tot; } } } kernel void kernel_mul_mv_q4_0_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } kernel void kernel_mul_mv_q4_1_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } kernel void kernel_mul_mv_q5_0_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } kernel void kernel_mul_mv_q5_1_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } #define NB_Q8_0 8 void kernel_mul_mv_q8_0_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nr = N_DST; const int nsg = N_SIMDGROUP; const int nw = N_SIMDWIDTH; const int nb = ne00/QK8_0; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * nsg + sgitg) * nr; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q8_0 * x = (device const block_q8_0 *) src0 + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[NB_Q8_0]; float sumf[nr]={0.f}; const int ix = tiisg/4; const int il = tiisg%4; device const float * yb = y + ix * QK8_0 + NB_Q8_0*il; // each thread in a SIMD group deals with NB_Q8_0 quants at a time for (int ib = ix; ib < nb; ib += nw/4) { for (int i = 0; i < NB_Q8_0; ++i) { yl[i] = yb[i]; } for (int row = 0; row < nr; row++) { device const int8_t * qs = x[ib+row*nb].qs + NB_Q8_0*il; float sumq = 0.f; for (int iq = 0; iq < NB_Q8_0; ++iq) { sumq += qs[iq] * yl[iq]; } sumf[row] += sumq*x[ib+row*nb].d; } yb += NB_Q8_0 * nw; } for (int row = 0; row < nr; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0 && first_row + row < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; } } } [[host_name("kernel_mul_mv_q8_0_f32")]] kernel void kernel_mul_mv_q8_0_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q8_0_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } #define N_F32_F32 4 void kernel_mul_mv_f32_f32_impl( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F32_F32; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const float * x = (device const float *) (src0 + offset0); if (ne00 < 128) { for (int row = 0; row < N_F32_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00; i += 32) { sumf += (float) x[i] * (float) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { device const float4 * x4 = (device const float4 *)x; for (int row = 0; row < N_F32_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); device const float4 * y4 = (device const float4 *) y; float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } [[host_name("kernel_mul_mv_f32_f32")]] kernel void kernel_mul_mv_f32_f32( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_f32_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } #define N_F16_F16 4 kernel void kernel_mul_mv_f16_f16( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F16_F16; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half * x = (device const half *) (src0 + offset0); if (ne00 < 128) { for (int row = 0; row < N_F16_F16; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00; i += 32) { sumf += (half) x[i] * (half) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { device const half4 * x4 = (device const half4 *)x; for (int row = 0; row < N_F16_F16; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); device const half4 * y4 = (device const half4 *) y; float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (half) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (half) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } void kernel_mul_mv_f16_f32_1row_impl( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half * x = (device const half *) (src0 + offset0); device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); float sumf = 0; if (ne00 < 128) { for (int i = tiisg; i < ne00; i += 32) { sumf += (float) x[i] * (float) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } else { device const half4 * x4 = (device const half4 *) x; device const float4 * y4 = (device const float4 *) y; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float)x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } [[host_name("kernel_mul_mv_f16_f32_1row")]] kernel void kernel_mul_mv_f16_f32_1row( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_f16_f32_1row_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } #define N_F16_F32 4 void kernel_mul_mv_f16_f32_impl( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F16_F32; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half * x = (device const half *) (src0 + offset0); if (ne00 < 128) { for (int row = 0; row < N_F16_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00; i += 32) { sumf += (float) x[i] * (float) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { device const half4 * x4 = (device const half4 *)x; for (int row = 0; row < N_F16_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); device const float4 * y4 = (device const float4 *) y; float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } [[host_name("kernel_mul_mv_f16_f32")]] kernel void kernel_mul_mv_f16_f32( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_f16_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } // Assumes row size (ne00) is a multiple of 4 kernel void kernel_mul_mv_f16_f32_l4( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int nrows = ne11; const int64_t r0 = tgpig.x; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half4 * x4 = (device const half4 *) (src0 + offset0); for (int r1 = 0; r1 < nrows; ++r1) { device const float4 * y4 = (device const float4 *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } kernel void kernel_alibi_f32( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant float & m0, constant float & m1, constant int & n_heads_log2_floor, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; //const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); const int64_t k = i3*ne3 + i2; float m_k; if (k < n_heads_log2_floor) { m_k = pow(m0, k + 1); } else { m_k = pow(m1, 2 * (k - n_heads_log2_floor) + 1); } device char * dst_row = (device char *) dst + i3*nb3 + i2*nb2 + i1*nb1; device const char * src_row = (device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01; for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { const float src_v = *(device float *)(src_row + i00*nb00); device float * dst_v = (device float *)(dst_row + i00*nb0); *dst_v = i00 * m_k + src_v; } } static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / max(0.001f, high - low); return 1.0f - min(1.0f, max(0.0f, y)); } // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. static void rope_yarn( float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, thread float * cos_theta, thread float * sin_theta ) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); } *cos_theta = cos(theta) * mscale; *sin_theta = sin(theta) * mscale; } // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get // `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) { return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base)); } static void rope_yarn_corr_dims( int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] ) { // start and end correction dims dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base))); dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base))); } typedef void (rope_t)( device const void * src0, device const int32_t * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int & n_past, constant int & n_dims, constant int & mode, constant int & n_orig_ctx, constant float & freq_base, constant float & freq_scale, constant float & ext_factor, constant float & attn_factor, constant float & beta_fast, constant float & beta_slow, uint tiitg[[thread_index_in_threadgroup]], uint3 tptg[[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]); template<typename T> kernel void kernel_rope( device const void * src0, device const int32_t * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int & n_past, constant int & n_dims, constant int & mode, constant int & n_orig_ctx, constant float & freq_base, constant float & freq_scale, constant float & ext_factor, constant float & attn_factor, constant float & beta_fast, constant float & beta_slow, uint tiitg[[thread_index_in_threadgroup]], uint3 tptg[[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { const int64_t i3 = tgpig[2]; const int64_t i2 = tgpig[1]; const int64_t i1 = tgpig[0]; const bool is_neox = mode & 2; float corr_dims[2]; rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims); device const int32_t * pos = src1; const int64_t p = pos[i2]; const float theta_0 = (float)p; const float inv_ndims = -1.f/n_dims; if (!is_neox) { for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) { const float theta = theta_0 * pow(freq_base, inv_ndims*i0); float cos_theta, sin_theta; rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); const T x0 = src[0]; const T x1 = src[1]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[1] = x0*sin_theta + x1*cos_theta; } } else { for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) { if (ic < n_dims) { const int64_t ib = 0; // simplified from `(ib * n_dims + ic) * inv_ndims` const float cur_rot = inv_ndims*ic - ib; const float theta = theta_0 * pow(freq_base, cur_rot); float cos_theta, sin_theta; rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); const int64_t i0 = ib*n_dims + ic/2; device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); const float x0 = src[0]; const float x1 = src[n_dims/2]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; } else { const int64_t i0 = ic; device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } } template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope<float>; template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope<half>; kernel void kernel_im2col_f16( device const float * x, device half * dst, constant int32_t & ofs0, constant int32_t & ofs1, constant int32_t & IW, constant int32_t & IH, constant int32_t & CHW, constant int32_t & s0, constant int32_t & s1, constant int32_t & p0, constant int32_t & p1, constant int32_t & d0, constant int32_t & d1, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int32_t iiw = tgpig[2] * s0 + tpitg[2] * d0 - p0; const int32_t iih = tgpig[1] * s1 + tpitg[1] * d1 - p1; const int32_t offset_dst = (tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + (tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]); if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst[offset_dst] = 0.0f; } else { const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1; dst[offset_dst] = x[offset_src + iih * IW + iiw]; } } kernel void kernel_upscale_f32( device const char * src0, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int32_t & sf, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3; const int64_t i02 = i2; const int64_t i01 = i1/sf; device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { dst_ptr[i0] = src0_ptr[i0/sf]; } } kernel void kernel_pad_f32( device const char * src0, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3; const int64_t i02 = i2; const int64_t i01 = i1; device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); if (i1 < ne01 && i2 < ne02 && i3 < ne03) { for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { if (i0 < ne00) { dst_ptr[i0] = src0_ptr[i0]; } else { dst_ptr[i0] = 0.0f; } } return; } for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { dst_ptr[i0] = 0.0f; } } // bitonic sort implementation following the CUDA kernels as reference typedef void (argsort_t)( device const float * x, device int32_t * dst, constant int64_t & ncols, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]); template<ggml_sort_order order> kernel void kernel_argsort_f32_i32( device const float * x, device int32_t * dst, constant int64_t & ncols, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]) { // bitonic sort int col = tpitg[0]; int row = tgpig[1]; if (col >= ncols) return; device const float * x_row = x + row * ncols; device int32_t * dst_row = dst + row * ncols; // initialize indices if (col < ncols) { dst_row[col] = col; } threadgroup_barrier(mem_flags::mem_threadgroup); for (int k = 2; k <= ncols; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (order == GGML_SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]]) { SWAP(dst_row[col], dst_row[ixj]); } } else { if (order == GGML_SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]]) { SWAP(dst_row[col], dst_row[ixj]); } } } threadgroup_barrier(mem_flags::mem_threadgroup); } } } template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_ASC>; template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_DESC>; kernel void kernel_leaky_relu_f32( device const float * src0, device float * dst, constant float & slope, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope; } kernel void kernel_cpy_f16_f16( device const half * src0, device half * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f16_f32( device const half * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_f16( device const float * src0, device half * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_f32( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_q8_0( device const float * src0, device void * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK8_0; device block_q8_0 * dst_data = (device block_q8_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x*QK8_0; i00 < ne00; i00 += ntg.x*QK8_0) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { const float v = src[j]; amax = MAX(amax, fabs(v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; dst_data[i00/QK8_0].d = d; for (int j = 0; j < QK8_0; ++j) { const float x0 = src[j]*id; dst_data[i00/QK8_0].qs[j] = round(x0); } } } kernel void kernel_cpy_f32_q4_0( device const float * src0, device void * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_0; device block_q4_0 * dst_data = (device block_q4_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x*QK4_0; i00 < ne00; i00 += ntg.x*QK4_0) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < QK4_0; j++) { const float v = src[j]; if (amax < fabs(v)) { amax = fabs(v); max = v; } } const float d = max / -8; const float id = d ? 1.0f/d : 0.0f; dst_data[i00/QK4_0].d = d; for (int j = 0; j < QK4_0/2; ++j) { const float x0 = src[0 + j]*id; const float x1 = src[QK4_0/2 + j]*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); dst_data[i00/QK4_0].qs[j] = xi0; dst_data[i00/QK4_0].qs[j] |= xi1 << 4; } } } kernel void kernel_cpy_f32_q4_1( device const float * src0, device void * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_1; device block_q4_1 * dst_data = (device block_q4_1 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x*QK4_1; i00 < ne00; i00 += ntg.x*QK4_1) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); float min = FLT_MAX; float max = -FLT_MAX; for (int j = 0; j < QK4_1; j++) { const float v = src[j]; if (min > v) min = v; if (max < v) max = v; } const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; dst_data[i00/QK4_1].d = d; dst_data[i00/QK4_1].m = min; for (int j = 0; j < QK4_1/2; ++j) { const float x0 = (src[0 + j] - min)*id; const float x1 = (src[QK4_1/2 + j] - min)*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); dst_data[i00/QK4_1].qs[j] = xi0; dst_data[i00/QK4_1].qs[j] |= xi1 << 4; } } } kernel void kernel_concat( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + tpitg.x*nb00; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { if (i02 < ne02) { ((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0]; src0_ptr += ntg.x*nb00; } else { ((device float *)dst_ptr)[0] = ((device float *)src1_ptr)[0]; src1_ptr += ntg.x*nb10; } dst_ptr += ntg.x*nb0; } } //============================================ k-quants ====================================================== #ifndef QK_K #define QK_K 256 #else static_assert(QK_K == 256 || QK_K == 64, "QK_K must be 256 or 64"); #endif #if QK_K == 256 #define K_SCALE_SIZE 12 #else #define K_SCALE_SIZE 4 #endif typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins } block_q2_K; // 84 bytes / block typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits #if QK_K == 64 uint8_t scales[2]; #else uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits #endif half d; // super-block scale } block_q3_K; #if QK_K == 64 typedef struct { half d[2]; // super-block scales/mins uint8_t scales[2]; uint8_t qs[QK_K/2]; // 4-bit quants } block_q4_K; #else typedef struct { half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; #endif #if QK_K == 64 typedef struct { half d; // super-block scales/mins int8_t scales[QK_K/16]; // 8-bit block scales uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; #else typedef struct { half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; // 176 bytes / block #endif typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales, quantized with 8 bits half d; // super-block scale } block_q6_K; // 210 bytes / block //====================================== dot products ========================= void kernel_mul_mv_q2_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int ib_row = first_row * nb; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[32]; float sumf[N_DST]={0.f}, all_sum; const int step = sizeof(block_q2_K) * nb; #if QK_K == 256 const int ix = tiisg/8; // 0...3 const int it = tiisg%8; // 0...7 const int iq = it/4; // 0 or 1 const int ir = it%4; // 0...3 const int is = (8*ir)/16;// 0 or 1 device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir; for (int ib = ix; ib < nb; ib += 4) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8]; yl[i+16] = y4[i+64]; sumy[2] += yl[i+16]; yl[i+24] = y4[i+96]; sumy[3] += yl[i+24]; } device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*iq + is; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; device const half * dh = &x[ib].d; for (int row = 0; row < N_DST; row++) { float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); } float dall = dh[0]; float dmin = dh[1] * 1.f/16.f; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + (acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f + (acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f + (acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) - dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0)); qs += step/2; sc += step; dh += step/2; } y4 += 4 * QK_K; } #else const int ix = tiisg/2; // 0...15 const int it = tiisg%2; // 0...1 device const float * y4 = y + ix * QK_K + 8 * it; for (int ib = ix; ib < nb; ib += 16) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; yl[i+ 8] = y4[i+16]; sumy[1] += yl[i+ 8]; yl[i+16] = y4[i+32]; sumy[2] += yl[i+16]; yl[i+24] = y4[i+48]; sumy[3] += yl[i+24]; } device const uint8_t * sc = (device const uint8_t *)x[ib].scales; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it; device const half * dh = &x[ib].d; for (int row = 0; row < N_DST; row++) { float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); } float dall = dh[0]; float dmin = dh[1]; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + (acc1[1] + 1.f/256.f * acc2[1]) * (sc[1] & 0xF) * 1.f/ 4.f + (acc1[2] + 1.f/256.f * acc2[2]) * (sc[2] & 0xF) * 1.f/16.f + (acc1[3] + 1.f/256.f * acc2[3]) * (sc[3] & 0xF) * 1.f/64.f) - dmin * (sumy[0] * (sc[0] >> 4) + sumy[1] * (sc[1] >> 4) + sumy[2] * (sc[2] >> 4) + sumy[3] * (sc[3] >> 4)); qs += step/2; sc += step; dh += step/2; } y4 += 16 * QK_K; } #endif for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; } } } [[host_name("kernel_mul_mv_q2_K_f32")]] kernel void kernel_mul_mv_q2_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q2_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } #if QK_K == 256 void kernel_mul_mv_q3_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int64_t im = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[32]; //const uint16_t kmask1 = 0x3030; //const uint16_t kmask2 = 0x0f0f; const int tid = tiisg/4; const int ix = tiisg%4; const int ip = tid/4; // 0 or 1 const int il = 2*((tid%4)/2); // 0 or 2 const int ir = tid%2; const int n = 8; const int l0 = n*ir; // One would think that the Metal compiler would figure out that ip and il can only have // 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it // with these two tales. // // Possible masks for the high bit const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0 {0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2 {0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0 {0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2 // Possible masks for the low 2 bits const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}}; const ushort4 hm = mm[2*ip + il/2]; const int shift = 2*il; const float v1 = il == 0 ? 4.f : 64.f; const float v2 = 4.f * v1; const uint16_t s_shift1 = 4*ip; const uint16_t s_shift2 = s_shift1 + il; const int q_offset = 32*ip + l0; const int y_offset = 128*ip + 32*il + l0; const int step = sizeof(block_q3_K) * nb / 2; device const float * y1 = yy + ix*QK_K + y_offset; uint32_t scales32, aux32; thread uint16_t * scales16 = (thread uint16_t *)&scales32; thread const int8_t * scales = (thread const int8_t *)&scales32; float sumf1[2] = {0.f}; float sumf2[2] = {0.f}; for (int i = ix; i < nb; i += 4) { for (int l = 0; l < 8; ++l) { yl[l+ 0] = y1[l+ 0]; yl[l+ 8] = y1[l+16]; yl[l+16] = y1[l+32]; yl[l+24] = y1[l+48]; } device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset); device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0); device const uint16_t * a = (device const uint16_t *)(x[i].scales); device const half * dh = &x[i].d; for (int row = 0; row < 2; ++row) { const float d_all = (float)dh[0]; scales16[0] = a[4]; scales16[1] = a[5]; aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030; scales16[0] = a[il+0]; scales16[1] = a[il+1]; scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32; float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0; for (int l = 0; l < n; l += 2) { const int32_t qs = q[l/2]; s1 += yl[l+0] * (qs & qm[il/2][0]); s2 += yl[l+1] * (qs & qm[il/2][1]); s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]); s4 += yl[l+16] * (qs & qm[il/2][2]); s5 += yl[l+17] * (qs & qm[il/2][3]); s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]); } float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); sumf1[row] += d1 * (scales[0] - 32); sumf2[row] += d2 * (scales[2] - 32); s1 = s2 = s3 = s4 = s5 = s6 = 0; for (int l = 0; l < n; l += 2) { const int32_t qs = q[l/2+8]; s1 += yl[l+8] * (qs & qm[il/2][0]); s2 += yl[l+9] * (qs & qm[il/2][1]); s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]); s4 += yl[l+24] * (qs & qm[il/2][2]); s5 += yl[l+25] * (qs & qm[il/2][3]); s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]); } d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); sumf1[row] += d1 * (scales[1] - 32); sumf2[row] += d2 * (scales[3] - 32); q += step; h += step; a += step; dh += step; } y1 += 4 * QK_K; } for (int row = 0; row < 2; ++row) { const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift); sumf1[row] = simd_sum(sumf); } if (tiisg == 0) { for (int row = 0; row < 2; ++row) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = sumf1[row]; } } } #else void kernel_mul_mv_q3_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int64_t im = tgpig.z; const int row = 2 * r0 + sgitg; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; const int ix = tiisg/4; const int il = 4 * (tiisg%4);// 0, 4, 8, 12 const int iq = il/8; // 0, 0, 1, 1 const int in = il%8; // 0, 4, 0, 4 float2 sum = {0.f, 0.f}; for (int i = ix; i < nb; i += 8) { const float d_all = (float)(x[i].d); device const uint16_t * q = (device const uint16_t *)(x[i].qs + il); device const uint16_t * h = (device const uint16_t *)(x[i].hmask + in); device const uint16_t * s = (device const uint16_t *)(x[i].scales); device const float * y = yy + i * QK_K + il; const float d1 = d_all * ((int32_t)(s[0] & 0x000F) - 8); const float d2 = d_all * ((int32_t)(s[0] & 0x00F0) - 128) * 1.f/64.f; const float d3 = d_all * ((int32_t)(s[0] & 0x0F00) - 2048) * 1.f/4096.f; const float d4 = d_all * ((int32_t)(s[0] & 0xF000) - 32768) * 1.f/262144.f; for (int l = 0; l < 4; l += 2) { const uint16_t hm = h[l/2] >> iq; sum[0] += y[l+ 0] * d1 * ((int32_t)(q[l/2] & 0x0003) - ((hm & 0x0001) ? 0 : 4)) + y[l+16] * d2 * ((int32_t)(q[l/2] & 0x000c) - ((hm & 0x0004) ? 0 : 16)) + y[l+32] * d3 * ((int32_t)(q[l/2] & 0x0030) - ((hm & 0x0010) ? 0 : 64)) + y[l+48] * d4 * ((int32_t)(q[l/2] & 0x00c0) - ((hm & 0x0040) ? 0 : 256)); sum[1] += y[l+ 1] * d1 * ((int32_t)(q[l/2] & 0x0300) - ((hm & 0x0100) ? 0 : 1024)) + y[l+17] * d2 * ((int32_t)(q[l/2] & 0x0c00) - ((hm & 0x0400) ? 0 : 4096)) + y[l+33] * d3 * ((int32_t)(q[l/2] & 0x3000) - ((hm & 0x1000) ? 0 : 16384)) + y[l+49] * d4 * ((int32_t)(q[l/2] & 0xc000) - ((hm & 0x4000) ? 0 : 65536)); } } const float sumf = sum[0] + sum[1] * 1.f/256.f; const float tot = simd_sum(sumf); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + row] = tot; } } #endif [[host_name("kernel_mul_mv_q3_K_f32")]] kernel void kernel_mul_mv_q3_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q3_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } #if QK_K == 256 void kernel_mul_mv_q4_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int ix = tiisg/8; // 0...3 const int it = tiisg%8; // 0...7 const int iq = it/4; // 0 or 1 const int ir = it%4; // 0...3 const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; //const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int first_row = r0 * N_DST; const int ib_row = first_row * nb; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[16]; float yh[16]; float sumf[N_DST]={0.f}, all_sum; const int step = sizeof(block_q4_K) * nb / 2; device const float * y4 = y + ix * QK_K + 64 * iq + 8 * ir; uint16_t sc16[4]; thread const uint8_t * sc8 = (thread const uint8_t *)sc16; for (int ib = ix; ib < nb; ib += 4) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0]; yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8]; yh[i+0] = y4[i+128]; sumy[2] += yh[i+0]; yh[i+8] = y4[i+160]; sumy[3] += yh[i+8]; } device const uint16_t * sc = (device const uint16_t *)x[ib].scales + iq; device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; device const half * dh = &x[ib].d; for (int row = 0; row < N_DST; row++) { sc16[0] = sc[0] & kmask1; sc16[1] = sc[2] & kmask1; sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2); sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2); device const uint16_t * q2 = q1 + 32; float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+0] * (q1[i/2] & 0x000F); acc1[1] += yl[i+1] * (q1[i/2] & 0x0F00); acc1[2] += yl[i+8] * (q1[i/2] & 0x00F0); acc1[3] += yl[i+9] * (q1[i/2] & 0xF000); acc2[0] += yh[i+0] * (q2[i/2] & 0x000F); acc2[1] += yh[i+1] * (q2[i/2] & 0x0F00); acc2[2] += yh[i+8] * (q2[i/2] & 0x00F0); acc2[3] += yh[i+9] * (q2[i/2] & 0xF000); } float dall = dh[0]; float dmin = dh[1]; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] + (acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f + (acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] + (acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) - dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); q1 += step; sc += step; dh += step; } y4 += 4 * QK_K; } for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; } } } #else void kernel_mul_mv_q4_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int ix = tiisg/4; // 0...7 const int it = tiisg%4; // 0...3 const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = r0 * N_DST; const int ib_row = first_row * nb; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[8]; float yh[8]; float sumf[N_DST]={0.f}, all_sum; const int step = sizeof(block_q4_K) * nb / 2; device const float * y4 = y + ix * QK_K + 8 * it; uint16_t sc16[4]; for (int ib = ix; ib < nb; ib += 8) { float2 sumy = {0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i] = y4[i+ 0]; sumy[0] += yl[i]; yh[i] = y4[i+32]; sumy[1] += yh[i]; } device const uint16_t * sc = (device const uint16_t *)x[ib].scales; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it; device const half * dh = x[ib].d; for (int row = 0; row < N_DST; row++) { sc16[0] = sc[0] & 0x000f; sc16[1] = sc[0] & 0x0f00; sc16[2] = sc[0] & 0x00f0; sc16[3] = sc[0] & 0xf000; float2 acc1 = {0.f, 0.f}; float2 acc2 = {0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+0] * (qs[i/2] & 0x000F); acc1[1] += yl[i+1] * (qs[i/2] & 0x0F00); acc2[0] += yh[i+0] * (qs[i/2] & 0x00F0); acc2[1] += yh[i+1] * (qs[i/2] & 0xF000); } float dall = dh[0]; float dmin = dh[1]; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc16[0] + (acc2[0] + 1.f/256.f * acc2[1]) * sc16[1] * 1.f/4096.f) - dmin * 1.f/16.f * (sumy[0] * sc16[2] + sumy[1] * sc16[3] * 1.f/256.f); qs += step; sc += step; dh += step; } y4 += 8 * QK_K; } for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; } } } #endif [[host_name("kernel_mul_mv_q4_K_f32")]] kernel void kernel_mul_mv_q4_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q4_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } void kernel_mul_mv_q5_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float sumf[2]={0.f}; const int step = sizeof(block_q5_K) * nb; #if QK_K == 256 # float yl[16], yh[16]; const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = tiisg/4; const int ix = tiisg%4; const int iq = tid/4; const int ir = tid%4; const int n = 8; const int l0 = n*ir; const int q_offset = 32*iq + l0; const int y_offset = 64*iq + l0; const uint8_t hm1 = 1u << (2*iq); const uint8_t hm2 = hm1 << 1; const uint8_t hm3 = hm1 << 4; const uint8_t hm4 = hm2 << 4; uint16_t sc16[4]; thread const uint8_t * sc8 = (thread const uint8_t *)sc16; device const float * y1 = yy + ix*QK_K + y_offset; for (int i = ix; i < nb; i += 4) { device const uint8_t * q1 = x[i].qs + q_offset; device const uint8_t * qh = x[i].qh + l0; device const half * dh = &x[i].d; device const uint16_t * a = (device const uint16_t *)x[i].scales + iq; device const float * y2 = y1 + 128; float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int l = 0; l < 8; ++l) { yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0]; yl[l+8] = y1[l+32]; sumy[1] += yl[l+8]; yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0]; yh[l+8] = y2[l+32]; sumy[3] += yh[l+8]; } for (int row = 0; row < 2; ++row) { device const uint8_t * q2 = q1 + 64; sc16[0] = a[0] & kmask1; sc16[1] = a[2] & kmask1; sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2); sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2); float4 acc1 = {0.f}; float4 acc2 = {0.f}; for (int l = 0; l < n; ++l) { uint8_t h = qh[l]; acc1[0] += yl[l+0] * (q1[l] & 0x0F); acc1[1] += yl[l+8] * (q1[l] & 0xF0); acc1[2] += yh[l+0] * (q2[l] & 0x0F); acc1[3] += yh[l+8] * (q2[l] & 0xF0); acc2[0] += h & hm1 ? yl[l+0] : 0.f; acc2[1] += h & hm2 ? yl[l+8] : 0.f; acc2[2] += h & hm3 ? yh[l+0] : 0.f; acc2[3] += h & hm4 ? yh[l+8] : 0.f; } const float dall = dh[0]; const float dmin = dh[1]; sumf[row] += dall * (sc8[0] * (acc1[0] + 16.f*acc2[0]) + sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) + sc8[4] * (acc1[2] + 16.f*acc2[2]) + sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) - dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); q1 += step; qh += step; dh += step/2; a += step/2; } y1 += 4 * QK_K; } #else float yl[8], yh[8]; const int il = 4 * (tiisg/8); // 0, 4, 8, 12 const int ix = tiisg%8; const int iq = il/8; // 0, 0, 1, 1 const int in = il%8; // 0, 4, 0, 4 device const float * y = yy + ix*QK_K + il; for (int i = ix; i < nb; i += 8) { for (int l = 0; l < 4; ++l) { yl[l+0] = y[l+ 0]; yl[l+4] = y[l+16]; yh[l+0] = y[l+32]; yh[l+4] = y[l+48]; } device const half * dh = &x[i].d; device const uint8_t * q = x[i].qs + il; device const uint8_t * h = x[i].qh + in; device const int8_t * s = x[i].scales; for (int row = 0; row < 2; ++row) { const float d = dh[0]; float2 acc = {0.f, 0.f}; for (int l = 0; l < 4; ++l) { const uint8_t hl = h[l] >> iq; acc[0] += yl[l+0] * s[0] * ((int16_t)(q[l+ 0] & 0x0F) - (hl & 0x01 ? 0 : 16)) + yl[l+4] * s[1] * ((int16_t)(q[l+16] & 0x0F) - (hl & 0x04 ? 0 : 16)); acc[1] += yh[l+0] * s[2] * ((int16_t)(q[l+ 0] & 0xF0) - (hl & 0x10 ? 0 : 256)) + yh[l+4] * s[3] * ((int16_t)(q[l+16] & 0xF0) - (hl & 0x40 ? 0 : 256)); } sumf[row] += d * (acc[0] + 1.f/16.f * acc[1]); q += step; h += step; s += step; dh += step/2; } y += 8 * QK_K; } #endif for (int row = 0; row < 2; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; } } } [[host_name("kernel_mul_mv_q5_K_f32")]] kernel void kernel_mul_mv_q5_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q5_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } void kernel_mul_mv_q6_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const uint8_t kmask1 = 0x03; const uint8_t kmask2 = 0x0C; const uint8_t kmask3 = 0x30; const uint8_t kmask4 = 0xC0; const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int im = tgpig.z; const int row = 2 * r0 + sgitg; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float sumf = 0; #if QK_K == 256 const int tid = tiisg/2; const int ix = tiisg%2; const int ip = tid/8; // 0 or 1 const int il = tid%8; const int n = 4; const int l0 = n*il; const int is = 8*ip + l0/16; const int y_offset = 128*ip + l0; const int q_offset_l = 64*ip + l0; const int q_offset_h = 32*ip + l0; for (int i = ix; i < nb; i += 2) { device const uint8_t * q1 = x[i].ql + q_offset_l; device const uint8_t * q2 = q1 + 32; device const uint8_t * qh = x[i].qh + q_offset_h; device const int8_t * sc = x[i].scales + is; device const float * y = yy + i * QK_K + y_offset; const float dall = x[i].d; float4 sums = {0.f, 0.f, 0.f, 0.f}; for (int l = 0; l < n; ++l) { sums[0] += y[l+ 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); sums[1] += y[l+32] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); sums[2] += y[l+64] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32); sums[3] += y[l+96] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); } sumf += dall * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]); } #else const int ix = tiisg/4; const int il = 4*(tiisg%4); for (int i = ix; i < nb; i += 8) { device const float * y = yy + i * QK_K + il; device const uint8_t * ql = x[i].ql + il; device const uint8_t * qh = x[i].qh + il; device const int8_t * s = x[i].scales; const float d = x[i].d; float4 sums = {0.f, 0.f, 0.f, 0.f}; for (int l = 0; l < 4; ++l) { sums[0] += y[l+ 0] * ((int8_t)((ql[l+ 0] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); sums[1] += y[l+16] * ((int8_t)((ql[l+16] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); sums[2] += y[l+32] * ((int8_t)((ql[l+ 0] >> 4) | ((qh[l] & kmask3) >> 0)) - 32); sums[3] += y[l+48] * ((int8_t)((ql[l+16] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); } sumf += d * (sums[0] * s[0] + sums[1] * s[1] + sums[2] * s[2] + sums[3] * s[3]); } #endif const float tot = simd_sum(sumf); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + row] = tot; } } [[host_name("kernel_mul_mv_q6_K_f32")]] kernel void kernel_mul_mv_q6_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q6_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } //============================= templates and their specializations ============================= // NOTE: this is not dequantizing - we are simply fitting the template template <typename type4x4> void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { float4x4 temp = *(((device float4x4 *)src)); for (int i = 0; i < 16; i++){ reg[i/4][i%4] = temp[i/4][i%4]; } } template <typename type4x4> void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) { half4x4 temp = *(((device half4x4 *)src)); for (int i = 0; i < 16; i++){ reg[i/4][i%4] = temp[i/4][i%4]; } } template <typename type4x4> void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 1); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float md = -8.h * xb->d; const ushort mask0 = il ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; for (int i=0;i<8;i++) { reg[i/2][2*(i%2)+0] = d1 * (qs[i] & mask0) + md; reg[i/2][2*(i%2)+1] = d2 * (qs[i] & mask1) + md; } } template <typename type4x4> void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float m = xb->m; const ushort mask0 = il ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; for (int i=0;i<8;i++) { reg[i/2][2*(i%2)+0] = ((qs[i] & mask0) * d1) + m; reg[i/2][2*(i%2)+1] = ((qs[i] & mask1) * d2) + m; } } template <typename type4x4> void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 3); const float d = xb->d; const float md = -16.h * xb->d; const ushort mask = il ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = il ? 4 : 0; const int gh_mv = il ? 12 : 0; const int gh_bk = il ? 0 : 4; for (int i = 0; i < 8; i++) { // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg[i/2][2*(i%2)+0] = d * x0 + md; reg[i/2][2*(i%2)+1] = d * x1 + md; } } template <typename type4x4> void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 4); const float d = xb->d; const float m = xb->m; const ushort mask = il ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = il ? 4 : 0; const int gh_mv = il ? 12 : 0; const int gh_bk = il ? 0 : 4; for (int i = 0; i < 8; i++) { // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg[i/2][2*(i%2)+0] = d * x0 + m; reg[i/2][2*(i%2)+1] = d * x1 + m; } } template <typename type4x4> void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) { device const int8_t * qs = ((device const int8_t *)xb->qs); const half d = xb->d; for (int i = 0; i < 16; i++) { reg[i/4][i%4] = (qs[i + 16*il] * d); } } template <typename type4x4> void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; const float min = xb->dmin; device const uint8_t * q = (device const uint8_t *)xb->qs; float dl, ml; uint8_t sc = xb->scales[il]; #if QK_K == 256 q = q + 32*(il/8) + 16*(il&1); il = (il/2)%4; #endif half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4); for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } } template <typename type4x4> void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) { const half d_all = xb->d; device const uint8_t * q = (device const uint8_t *)xb->qs; device const uint8_t * h = (device const uint8_t *)xb->hmask; device const int8_t * scales = (device const int8_t *)xb->scales; #if QK_K == 256 q = q + 32 * (il/8) + 16 * (il&1); h = h + 16 * (il&1); uint8_t m = 1 << (il/2); uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \ ((il/4)>0 ? 12 : 3); uint16_t kmask2 = il/8 ? 0xF0 : 0x0F; uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4]; int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2) : (scale_2&kmask2) | ((scale_1&kmask1) << 4); half dl = il<8 ? d_all * (dl_int - 32.h) : d_all * (dl_int / 16.h - 32.h); const half ml = 4.h * dl; il = (il/2) & 3; const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); const uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); dl *= coef; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - (h[i] & m ? 0 : ml); } #else float kcoef = il&1 ? 1.f/16.f : 1.f; uint16_t kmask = il&1 ? 0xF0 : 0x0F; float dl = d_all * ((scales[il/2] & kmask) * kcoef - 8); float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); uint8_t m = 1<<(il*2); for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i%8] & (m * (1 + i/8))) ? 0 : 4.f/coef)); } #endif } static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q) { return j < 4 ? uchar2{uchar(q[j+0+k] & 63), uchar(q[j+4+k] & 63)} : uchar2{uchar((q[j+4+k] & 0xF) | ((q[j-4+k] & 0xc0) >> 2)), uchar((q[j+4+k] >> 4) | ((q[j-0+k] & 0xc0) >> 2))}; } template <typename type4x4> void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) { device const uchar * q = xb->qs; #if QK_K == 256 short is = (il/4) * 2; q = q + (il/4) * 32 + 16 * (il&1); il = il & 3; const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); const float d = il < 2 ? xb->d : xb->d / 16.h; const float min = xb->dmin; const float dl = d * sc[0]; const float ml = min * sc[1]; #else q = q + 16 * (il&1); device const uint8_t * s = xb->scales; device const half2 * dh = (device const half2 *)xb->d; const float2 d = (float2)dh[0]; const float dl = il<2 ? d[0] * (s[0]&0xF) : d[0] * (s[1]&0xF)/16.h; const float ml = il<2 ? d[1] * (s[0]>>4) : d[1] * (s[1]>>4); #endif const ushort mask = il<2 ? 0x0F : 0xF0; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } } template <typename type4x4> void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) { device const uint8_t * q = xb->qs; device const uint8_t * qh = xb->qh; #if QK_K == 256 short is = (il/4) * 2; q = q + 32 * (il/4) + 16 * (il&1); qh = qh + 16 * (il&1); uint8_t ul = 1 << (il/2); il = il & 3; const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); const float d = il < 2 ? xb->d : xb->d / 16.h; const float min = xb->dmin; const float dl = d * sc[0]; const float ml = min * sc[1]; const ushort mask = il<2 ? 0x0F : 0xF0; const float qh_val = il<2 ? 16.f : 256.f; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml; } #else q = q + 16 * (il&1); device const int8_t * s = xb->scales; const float dl = xb->d * s[il]; uint8_t m = 1<<(il*2); const float coef = il<2 ? 1.f : 1.f/16.f; const ushort mask = il<2 ? 0x0F : 0xF0; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = coef * dl * ((q[i] & mask) - (qh[i%8] & (m*(1+i/8)) ? 0.f : 16.f/coef)); } #endif } template <typename type4x4> void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) { const half d_all = xb->d; device const uint8_t * ql = (device const uint8_t *)xb->ql; device const uint8_t * qh = (device const uint8_t *)xb->qh; device const int8_t * scales = (device const int8_t *)xb->scales; #if QK_K == 256 ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1); qh = qh + 32*(il/8) + 16*(il&1); half sc = scales[(il%2) + 2 * ((il/2))]; il = (il/2) & 3; #else ql = ql + 16 * (il&1); half sc = scales[il]; #endif const uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); const uint16_t kmask2 = il>1 ? 0xF0 : 0x0F; const half coef = il>1 ? 1.f/16.h : 1.h; const half ml = d_all * sc * 32.h; const half dl = d_all * sc * coef; for (int i = 0; i < 16; ++i) { const half q = il&1 ? ((ql[i] & kmask2) | ((qh[i] & kmask1) << 2)) : ((ql[i] & kmask2) | ((qh[i] & kmask1) << 4)); reg[i/4][i%4] = dl * q - ml; } } template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread float4x4 &)> kernel void kernel_get_rows( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { //const int64_t i = tgpig; //const int64_t r = ((device int32_t *) src1)[i]; const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; const int64_t i02 = i11; for (int64_t ind = tiitg; ind < ne00/16; ind += tptg.x) { float4x4 temp; dequantize_func( ((device const block_q *) ((device char *) src0 + r*nb01 + i02*nb02)) + ind/nl, ind%nl, temp); *(((device float4x4 *) ((device char *) dst + i11*nb2 + i10*nb1)) + ind) = temp; } } kernel void kernel_get_rows_f32( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; const int64_t i02 = i11; for (int ind = tiitg; ind < ne00; ind += tptg.x) { ((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = ((device float *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; } } kernel void kernel_get_rows_f16( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; const int64_t i02 = i11; for (int ind = tiitg; ind < ne00; ind += tptg.x) { ((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = ((device half *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; } } #define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A #define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B #define BLOCK_SIZE_K 32 #define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A #define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B #define THREAD_PER_BLOCK 128 #define THREAD_PER_ROW 2 // 2 thread for each row in matrix A to load numbers #define THREAD_PER_COL 4 // 4 thread for each row in matrix B to load numbers #define SG_MAT_SIZE 64 // simdgroup matrix is of shape 8x8 #define SG_MAT_ROW 8 // each block_q contains 16*nl weights template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> void kernel_mul_mm_impl(device const uchar * src0, device const uchar * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, threadgroup uchar * shared_memory [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { threadgroup half * sa = (threadgroup half *)(shared_memory); threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); const uint r0 = tgpig.y; const uint r1 = tgpig.x; const uint im = tgpig.z; // if this block is of 64x32 shape or smaller short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; // a thread shouldn't load data outside of the matrix short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; simdgroup_half8x8 ma[4]; simdgroup_float8x8 mb[2]; simdgroup_float8x8 c_res[8]; for (int i = 0; i < 8; i++){ c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f); } short il = (tiitg % THREAD_PER_ROW); const uint i12 = im%ne12; const uint i13 = im/ne12; uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02); ushort offset1 = il/nl; device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1; device const float * y = (device const float *)(src1 + nb12 * im + nb11 * (r1 * BLOCK_SIZE_N + thread_col) + nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { // load data and store to threadgroup memory half4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); #pragma unroll(16) for (int i = 0; i < 16; i++) { *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \ + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; } *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y); il = (il + 2 < nl) ? il + 2 : il % 2; x = (il < 2) ? x + (2+nl-1)/nl : x; y += BLOCK_SIZE_K; threadgroup_barrier(mem_flags::mem_threadgroup); // load matrices from threadgroup memory and conduct outer products threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); #pragma unroll(4) for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { #pragma unroll(4) for (int i = 0; i < 4; i++) { simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i); } simdgroup_barrier(mem_flags::mem_none); #pragma unroll(2) for (int i = 0; i < 2; i++) { simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i); } lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; #pragma unroll(8) for (int i = 0; i < 8; i++){ simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); } } } if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) { device float * C = dst + (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) \ + (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * ne0 + im*ne1*ne0; for (int i = 0; i < 8; i++) { simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0); } } else { // block is smaller than 64x32, we should avoid writing data outside of the matrix threadgroup_barrier(mem_flags::mem_threadgroup); threadgroup float * temp_str = ((threadgroup float *)shared_memory) \ + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; for (int i = 0; i < 8; i++) { simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); } threadgroup_barrier(mem_flags::mem_threadgroup); device float * C = dst + (BLOCK_SIZE_M * r0) + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0; if (sgitg == 0) { for (int i = 0; i < n_rows; i++) { for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { *(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M); } } } } } // same as kernel_mul_mm_impl, but src1 and dst are accessed via indices stored in src1ids template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> void kernel_mul_mm_id_impl( device const uchar * src0, device const uchar * src1, thread short * src1ids, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, int64_t ne1, constant uint & r2, constant uint & r3, threadgroup uchar * shared_memory, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { threadgroup half * sa = (threadgroup half *)(shared_memory); threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); const uint r0 = tgpig.y; const uint r1 = tgpig.x; const uint im = tgpig.z; if (r1 * BLOCK_SIZE_N >= ne1) return; // if this block is of 64x32 shape or smaller short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; // a thread shouldn't load data outside of the matrix short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; simdgroup_half8x8 ma[4]; simdgroup_float8x8 mb[2]; simdgroup_float8x8 c_res[8]; for (int i = 0; i < 8; i++){ c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f); } short il = (tiitg % THREAD_PER_ROW); const uint i12 = im%ne12; const uint i13 = im/ne12; uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02); ushort offset1 = il/nl; device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1; device const float * y = (device const float *)(src1 + nb12 * im + nb11 * src1ids[r1 * BLOCK_SIZE_N + thread_col] + nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { // load data and store to threadgroup memory half4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); for (int i = 0; i < 16; i++) { *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \ + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; } *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y); il = (il + 2 < nl) ? il + 2 : il % 2; x = (il < 2) ? x + (2+nl-1)/nl : x; y += BLOCK_SIZE_K; threadgroup_barrier(mem_flags::mem_threadgroup); // load matrices from threadgroup memory and conduct outer products threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { for (int i = 0; i < 4; i++) { simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i); } simdgroup_barrier(mem_flags::mem_none); for (int i = 0; i < 2; i++) { simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i); } lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; for (int i = 0; i < 8; i++){ simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); } } } { threadgroup_barrier(mem_flags::mem_threadgroup); threadgroup float * temp_str = ((threadgroup float *)shared_memory) \ + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; for (int i = 0; i < 8; i++) { simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); } threadgroup_barrier(mem_flags::mem_threadgroup); device float * C = dst + (BLOCK_SIZE_M * r0) + im*ne1*ne0; if (sgitg == 0) { for (int i = 0; i < n_rows; i++) { for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { *(C + i + src1ids[j + r1*BLOCK_SIZE_N] * ne0) = *(temp_str + i + j * BLOCK_SIZE_M); } } } } } template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> kernel void kernel_mul_mm(device const uchar * src0, device const uchar * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, threadgroup uchar * shared_memory [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mm_impl<block_q, nl, dequantize_func>( src0, src1, dst, ne00, ne02, nb01, nb02, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, shared_memory, tgpig, tiitg, sgitg); } template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> kernel void kernel_mul_mm_id( device const uchar * ids, device const uchar * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const uchar * src00, device const uchar * src01, device const uchar * src02, device const uchar * src03, device const uchar * src04, device const uchar * src05, device const uchar * src06, device const uchar * src07, threadgroup uchar * shared_memory [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const uchar * src0s[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; // expert id const int32_t id = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); // row indices of src1 for expert id int64_t _ne1 = 0; short src1ids[512]; for (int64_t i1 = 0; i1 < ne1; i1++) { if (((device int32_t *) (ids + i1*nbi1))[idx] == id) { src1ids[_ne1++] = i1; } } kernel_mul_mm_id_impl<block_q, nl, dequantize_func>( src0s[id], src1, src1ids, dst, ne00, ne02, nb01, nb02, ne12, nb10, nb11, nb12, ne0, _ne1, r2, r3, shared_memory, tgpig, tiitg, sgitg); } #if QK_K == 256 #define QK_NL 16 #else #define QK_NL 4 #endif // // get rows // typedef void (get_rows_t)( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3, uint, uint3); //template [[host_name("kernel_get_rows_f32")]] kernel get_rows_t kernel_get_rows<float4x4, 1, dequantize_f32>; //template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows<half4x4, 1, dequantize_f16>; template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows<block_q4_0, 2, dequantize_q4_0>; template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows<block_q4_1, 2, dequantize_q4_1>; template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_t kernel_get_rows<block_q5_0, 2, dequantize_q5_0>; template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_t kernel_get_rows<block_q5_1, 2, dequantize_q5_1>; template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows<block_q8_0, 2, dequantize_q8_0>; template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows<block_q2_K, QK_NL, dequantize_q2_K>; template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows<block_q3_K, QK_NL, dequantize_q3_K>; template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows<block_q4_K, QK_NL, dequantize_q4_K>; template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_t kernel_get_rows<block_q5_K, QK_NL, dequantize_q5_K>; template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_t kernel_get_rows<block_q6_K, QK_NL, dequantize_q6_K>; // // matrix-matrix multiplication // typedef void (mat_mm_t)( device const uchar * src0, device const uchar * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, threadgroup uchar *, uint3, uint, uint); template [[host_name("kernel_mul_mm_f32_f32")]] kernel mat_mm_t kernel_mul_mm<float4x4, 1, dequantize_f32>; template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm<half4x4, 1, dequantize_f16>; template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_0, 2, dequantize_q4_0>; template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_1, 2, dequantize_q4_1>; template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_0, 2, dequantize_q5_0>; template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_1, 2, dequantize_q5_1>; template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q8_0, 2, dequantize_q8_0>; template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q2_K, QK_NL, dequantize_q2_K>; template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q3_K, QK_NL, dequantize_q3_K>; template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_K, QK_NL, dequantize_q4_K>; template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_K, QK_NL, dequantize_q5_K>; template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q6_K, QK_NL, dequantize_q6_K>; // // indirect matrix-matrix multiplication // typedef void (mat_mm_id_t)( device const uchar * ids, device const uchar * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const uchar * src00, device const uchar * src01, device const uchar * src02, device const uchar * src03, device const uchar * src04, device const uchar * src05, device const uchar * src06, device const uchar * src07, threadgroup uchar *, uint3, uint, uint); template [[host_name("kernel_mul_mm_id_f32_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<float4x4, 1, dequantize_f32>; template [[host_name("kernel_mul_mm_id_f16_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<half4x4, 1, dequantize_f16>; template [[host_name("kernel_mul_mm_id_q4_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_0, 2, dequantize_q4_0>; template [[host_name("kernel_mul_mm_id_q4_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_1, 2, dequantize_q4_1>; template [[host_name("kernel_mul_mm_id_q5_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_0, 2, dequantize_q5_0>; template [[host_name("kernel_mul_mm_id_q5_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_1, 2, dequantize_q5_1>; template [[host_name("kernel_mul_mm_id_q8_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q8_0, 2, dequantize_q8_0>; template [[host_name("kernel_mul_mm_id_q2_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q2_K, QK_NL, dequantize_q2_K>; template [[host_name("kernel_mul_mm_id_q3_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q3_K, QK_NL, dequantize_q3_K>; template [[host_name("kernel_mul_mm_id_q4_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_K, QK_NL, dequantize_q4_K>; template [[host_name("kernel_mul_mm_id_q5_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_K, QK_NL, dequantize_q5_K>; template [[host_name("kernel_mul_mm_id_q6_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q6_K, QK_NL, dequantize_q6_K>; // // matrix-vector multiplication // [[host_name("kernel_mul_mv_id_f32_f32")]] kernel void kernel_mul_mv_id_f32_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_f32_f32_impl( src0[id], src1 + bid*nb11, dst + bid*ne0, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } [[host_name("kernel_mul_mv_id_f16_f32")]] kernel void kernel_mul_mv_id_f16_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_f16_f32_impl( src0[id], src1 + bid*nb11, dst + bid*ne0, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } [[host_name("kernel_mul_mv_id_q8_0_f32")]] kernel void kernel_mul_mv_id_q8_0_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q8_0_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q4_0_f32")]] kernel void kernel_mul_mv_id_q4_0_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel void kernel_mul_mv_id_q4_1_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel void kernel_mul_mv_id_q5_0_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel void kernel_mul_mv_id_q5_1_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel void kernel_mul_mv_id_q2_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q2_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel void kernel_mul_mv_id_q3_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q3_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel void kernel_mul_mv_id_q4_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q4_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q5_K_f32")]] kernel void kernel_mul_mv_id_q5_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q5_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q6_K_f32")]] kernel void kernel_mul_mv_id_q6_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q6_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); }
candle/candle-metal-kernels/src/quantized.metal/0
{ "file_path": "candle/candle-metal-kernels/src/quantized.metal", "repo_id": "candle", "token_count": 97299 }
36
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle::{DType, Device, Module, Tensor}; use candle_nn::LayerNorm; use criterion::{black_box, criterion_group, Criterion}; use std::time::Instant; fn run(input: &Tensor, weight: &Tensor, bias: &Tensor) { let _ = LayerNorm::new(weight.clone(), bias.clone(), 1e-5).forward(input); } const B: usize = 1; const M: usize = 1024; const K: usize = 1024; fn run_layer_norm_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let elements = B * M * K; let weight = Tensor::arange(0.0, elements as f32, device) .unwrap() .to_dtype(dtype) .unwrap(); let bias = weight.ones_like().unwrap(); let input = weight.ones_like().unwrap(); let mut group = c.benchmark_group(device.bench_name(name)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&input), black_box(&weight), black_box(&bias)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let device = BenchDeviceHandler::new().unwrap(); for d in device.devices { run_layer_norm_benchmark(c, &d, DType::F32, "layer_norm_f32"); run_layer_norm_benchmark(c, &d, DType::BF16, "layer_norm_bf16"); run_layer_norm_benchmark(c, &d, DType::F16, "layer_norm_f16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-nn/benches/benchmarks/layer_norm.rs/0
{ "file_path": "candle/candle-nn/benches/benchmarks/layer_norm.rs", "repo_id": "candle", "token_count": 676 }
37
use candle::{Result, Tensor}; /// The negative log likelihood loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to contain log probabilities. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn nll(inp: &Tensor, target: &Tensor) -> Result<Tensor> { let b_sz = match target.dims() { &[b_sz] => b_sz, dims => candle::bail!("the target tensor should have a single dimension ({dims:?})"), }; match inp.dims() { &[inp_b_sz, _] => { if inp_b_sz != b_sz { candle::bail!("batch size mismatch between inp ({inp_b_sz}) and target ({b_sz})") } } dims => candle::bail!("the target tensor should have two dimensions ({dims:?})"), } inp.gather(&target.unsqueeze(1)?, 1)? .sum_all()? .affine(-1f64 / b_sz as f64, 0.) } /// The cross-entropy loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to raw logits. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn cross_entropy(inp: &Tensor, target: &Tensor) -> Result<Tensor> { if inp.rank() != 2 { candle::bail!("cross_entropy expects an input tensor of rank 2") } let inp = crate::ops::log_softmax(inp, 1)?; nll(&inp, target) } /// The mean squared error loss. pub fn mse(inp: &Tensor, target: &Tensor) -> Result<Tensor> { (inp - target)?.sqr()?.mean_all() } /// The binary cross-entropy with logit loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to raw logits. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N, C` where `N` is the batch size and `C` the number /// of categories. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn binary_cross_entropy_with_logit(inp: &Tensor, target: &Tensor) -> Result<Tensor> { let inp = crate::ops::sigmoid(inp)?; let left_side = target * inp.log()?; let right_side = (target.affine(-1., 1.))? * inp.affine(-1., 1.)?.log()?; let loss = left_side? + right_side?; let loss = loss?.neg()?.mean_all()?; Ok(loss) }
candle/candle-nn/src/loss.rs/0
{ "file_path": "candle/candle-nn/src/loss.rs", "repo_id": "candle", "token_count": 1040 }
38
[package] name = "candle-onnx" version = "0.6.1" edition = "2021" description = "ONNX support for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] candle = { path = "../candle-core", package = "candle-core", version = "0.6.1" } candle-nn = { path = "../candle-nn", version = "0.6.1" } prost = "0.12.1" [build-dependencies] prost-build = "0.12.1" [dev-dependencies] anyhow = { version = "1", features = ["backtrace"] } clap = { version = "4.2.4", features = ["derive"] }
candle/candle-onnx/Cargo.toml/0
{ "file_path": "candle/candle-onnx/Cargo.toml", "repo_id": "candle", "token_count": 242 }
39
# Generated content DO NOT EDIT from .. import functional avg_pool2d = functional.avg_pool2d gelu = functional.gelu max_pool2d = functional.max_pool2d relu = functional.relu silu = functional.silu softmax = functional.softmax tanh = functional.tanh
candle/candle-pyo3/py_src/candle/functional/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/functional/__init__.py", "repo_id": "candle", "token_count": 84 }
40
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor @staticmethod def cuda_is_available() -> bool: """ Returns true if the 'cuda' backend is available. """ pass @staticmethod def get_num_threads() -> int: """ Returns the number of threads used by the candle. """ pass @staticmethod def has_accelerate() -> bool: """ Returns true if candle was compiled with 'accelerate' support. """ pass @staticmethod def has_mkl() -> bool: """ Returns true if candle was compiled with MKL support. """ pass @staticmethod def load_ggml( path: Union[str, PathLike], device: Optional[Device] = None ) -> Tuple[Dict[str, QTensor], Dict[str, Any], List[str]]: """ Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors, a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary. """ pass @staticmethod def load_gguf( path: Union[str, PathLike], device: Optional[Device] = None ) -> Tuple[Dict[str, QTensor], Dict[str, Any]]: """ Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors, and the second maps metadata keys to metadata values. """ pass @staticmethod def load_safetensors(path: Union[str, PathLike]) -> Dict[str, Tensor]: """ Loads a safetensors file. Returns a dictionary mapping tensor names to tensors. """ pass @staticmethod def save_gguf(path: Union[str, PathLike], tensors: Dict[str, QTensor], metadata: Dict[str, Any]): """ Save quanitzed tensors and metadata to a GGUF file. """ pass @staticmethod def save_safetensors(path: Union[str, PathLike], tensors: Dict[str, Tensor]) -> None: """ Saves a dictionary of tensors to a safetensors file. """ pass
candle/candle-pyo3/py_src/candle/utils/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/utils/__init__.pyi", "repo_id": "candle", "token_count": 712 }
41
import candle from candle import Tensor, QTensor from candle.utils import load_safetensors, save_gguf, load_gguf, save_safetensors from pathlib import Path TEST_DIR = Path(__file__).parent.parent / "_workdir" TEST_DIR.mkdir(exist_ok=True) def test_can_roundtrip_safetensors(): tensors = { "a": candle.randn((16, 256)), "b": candle.randn((16, 16)), } file = str(TEST_DIR / "test.safetensors") save_safetensors(file, tensors) loaded_tensors = load_safetensors(file) assert set(tensors.keys()) == set(loaded_tensors.keys()) for key in tensors.keys(): assert tensors[key].values() == loaded_tensors[key].values(), "Values are not equal" assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal" assert str(tensors[key].dtype) == str(loaded_tensors[key].dtype), "Dtypes are not equal" def test_can_roundtrip_gguf(): metadata = { "a": 1, "b": "foo", "c": [1, 2, 3], "d": [[1, 2], [3, 4]], } tensors = { "a": candle.randn((16, 256)).quantize("q4_0"), "b": candle.randn((16, 16)).quantize("f32"), } file = str(TEST_DIR / "test.gguf") save_gguf(file, tensors, metadata) loaded_tensors, loaded_metadata = load_gguf(file) assert set(metadata.keys()) == set(loaded_metadata.keys()) for key in metadata.keys(): assert metadata[key] == loaded_metadata[key] assert set(tensors.keys()) == set(loaded_tensors.keys()) for key in tensors.keys(): assert tensors[key].dequantize().values() == loaded_tensors[key].dequantize().values(), "Values are not equal" assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal" assert str(tensors[key].ggml_dtype) == str(loaded_tensors[key].ggml_dtype), "Dtypes are not equal"
candle/candle-pyo3/tests/native/test_utils.py/0
{ "file_path": "candle/candle-pyo3/tests/native/test_utils.py", "repo_id": "candle", "token_count": 774 }
42
use candle::Result; use candle_nn::{batch_norm, Conv2dConfig, Module, VarBuilder}; #[allow(clippy::many_single_char_names)] fn conv2d_same( i: usize, o: usize, k: usize, c: Conv2dConfig, vb: VarBuilder, ) -> Result<impl Module> { let conv2d = candle_nn::conv2d(i, o, k, c, vb)?; let s = c.stride; let module = candle_nn::func(move |xs| { let ih = xs.dim(2)?; let iw = xs.dim(3)?; let oh = (ih + s - 1) / s; let ow = (iw + s - 1) / s; let pad_h = usize::max((oh - 1) * s + k - ih, 0); let pad_w = usize::max((ow - 1) * s + k - iw, 0); if pad_h > 0 || pad_w > 0 { xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)? .pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)? .apply(&conv2d) } else { xs.apply(&conv2d) } }); Ok(module) } fn block(dim: usize, kernel_size: usize, vb: VarBuilder) -> Result<impl Module> { let conv2d_cfg = Conv2dConfig { groups: dim, ..Default::default() }; let vb_fn = vb.pp(0).pp("fn"); let conv1 = conv2d_same(dim, dim, kernel_size, conv2d_cfg, vb_fn.pp(0))?; let bn1 = batch_norm(dim, 1e-5, vb_fn.pp(2))?; let conv2 = candle_nn::conv2d(dim, dim, 1, Default::default(), vb.pp(1))?; let bn2 = batch_norm(dim, 1e-5, vb.pp(3))?; Ok(candle_nn::func(move |xs| { let ys = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?; (xs + ys)?.apply(&conv2)?.gelu_erf()?.apply_t(&bn2, false) })) } fn convmixer( nclasses: usize, dim: usize, depth: usize, kernel_size: usize, patch_size: usize, vb: VarBuilder, ) -> Result<candle_nn::Func<'static>> { let conv2d_cfg = Conv2dConfig { stride: patch_size, ..Default::default() }; let conv1 = candle_nn::conv2d(3, dim, patch_size, conv2d_cfg, vb.pp(0))?; let bn1 = batch_norm(dim, 1e-5, vb.pp(2))?; let blocks: Vec<_> = (0..depth) .map(|index| block(dim, kernel_size, vb.pp(3 + index))) .collect::<Result<Vec<_>>>()?; let fc = candle_nn::linear(dim, nclasses, vb.pp(25))?; Ok(candle_nn::func(move |xs| { let mut xs = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?; for block in blocks.iter() { xs = xs.apply(block)? } // This performs the adaptive average pooling with a target size of (1, 1). xs.mean(3)?.mean(2)?.apply(&fc) })) } pub fn c1536_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> { convmixer(nclasses, 1536, 20, 9, 7, vb) } pub fn c1024_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> { convmixer(nclasses, 1024, 20, 9, 14, vb) }
candle/candle-transformers/src/models/convmixer.rs/0
{ "file_path": "candle/candle-transformers/src/models/convmixer.rs", "repo_id": "candle", "token_count": 1413 }
43
use candle::{Device, Result, Tensor}; pub fn get_noise( num_samples: usize, height: usize, width: usize, device: &Device, ) -> Result<Tensor> { let height = (height + 15) / 16 * 2; let width = (width + 15) / 16 * 2; Tensor::randn(0f32, 1., (num_samples, 16, height, width), device) } #[derive(Debug, Clone)] pub struct State { pub img: Tensor, pub img_ids: Tensor, pub txt: Tensor, pub txt_ids: Tensor, pub vec: Tensor, } impl State { pub fn new(t5_emb: &Tensor, clip_emb: &Tensor, img: &Tensor) -> Result<Self> { let dtype = img.dtype(); let (bs, c, h, w) = img.dims4()?; let dev = img.device(); let img = img.reshape((bs, c, h / 2, 2, w / 2, 2))?; // (b, c, h, ph, w, pw) let img = img.permute((0, 2, 4, 1, 3, 5))?; // (b, h, w, c, ph, pw) let img = img.reshape((bs, h / 2 * w / 2, c * 4))?; let img_ids = Tensor::stack( &[ Tensor::full(0u32, (h / 2, w / 2), dev)?, Tensor::arange(0u32, h as u32 / 2, dev)? .reshape(((), 1))? .broadcast_as((h / 2, w / 2))?, Tensor::arange(0u32, w as u32 / 2, dev)? .reshape((1, ()))? .broadcast_as((h / 2, w / 2))?, ], 2, )? .to_dtype(dtype)?; let img_ids = img_ids.reshape((1, h / 2 * w / 2, 3))?; let img_ids = img_ids.repeat((bs, 1, 1))?; let txt = t5_emb.repeat(bs)?; let txt_ids = Tensor::zeros((bs, txt.dim(1)?, 3), dtype, dev)?; let vec = clip_emb.repeat(bs)?; Ok(Self { img, img_ids, txt, txt_ids, vec, }) } } fn time_shift(mu: f64, sigma: f64, t: f64) -> f64 { let e = mu.exp(); e / (e + (1. / t - 1.).powf(sigma)) } /// `shift` is a triple `(image_seq_len, base_shift, max_shift)`. pub fn get_schedule(num_steps: usize, shift: Option<(usize, f64, f64)>) -> Vec<f64> { let timesteps: Vec<f64> = (0..=num_steps) .map(|v| v as f64 / num_steps as f64) .rev() .collect(); match shift { None => timesteps, Some((image_seq_len, y1, y2)) => { let (x1, x2) = (256., 4096.); let m = (y2 - y1) / (x2 - x1); let b = y1 - m * x1; let mu = m * image_seq_len as f64 + b; timesteps .into_iter() .map(|v| time_shift(mu, 1., v)) .collect() } } } pub fn unpack(xs: &Tensor, height: usize, width: usize) -> Result<Tensor> { let (b, _h_w, c_ph_pw) = xs.dims3()?; let height = (height + 15) / 16; let width = (width + 15) / 16; xs.reshape((b, height, width, c_ph_pw / 4, 2, 2))? // (b, h, w, c, ph, pw) .permute((0, 3, 1, 4, 2, 5))? // (b, c, h, ph, w, pw) .reshape((b, c_ph_pw / 4, height * 2, width * 2)) } #[allow(clippy::too_many_arguments)] pub fn denoise( model: &super::model::Flux, img: &Tensor, img_ids: &Tensor, txt: &Tensor, txt_ids: &Tensor, vec_: &Tensor, timesteps: &[f64], guidance: f64, ) -> Result<Tensor> { let b_sz = img.dim(0)?; let dev = img.device(); let guidance = Tensor::full(guidance as f32, b_sz, dev)?; let mut img = img.clone(); for window in timesteps.windows(2) { let (t_curr, t_prev) = match window { [a, b] => (a, b), _ => continue, }; let t_vec = Tensor::full(*t_curr as f32, b_sz, dev)?; let pred = model.forward(&img, img_ids, txt, txt_ids, &t_vec, vec_, Some(&guidance))?; img = (img + pred * (t_prev - t_curr))? } Ok(img) }
candle/candle-transformers/src/models/flux/sampling.rs/0
{ "file_path": "candle/candle-transformers/src/models/flux/sampling.rs", "repo_id": "candle", "token_count": 2061 }
44
use crate::models::with_tracing::{linear, Embedding as E, Linear}; /// MixFormer model. /// https://huggingface.co/microsoft/phi-1_5 /// https://arxiv.org/abs/2309.05463 use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; const MAX_SEQ_LEN: usize = 4096; // https://huggingface.co/microsoft/phi-1_5/blob/d38e6f954ec29b96fe2cf033937dad64e279b5d9/configuration_mixformer_sequential.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) n_positions: usize, pub(crate) n_embd: usize, pub(crate) n_layer: usize, pub(crate) n_inner: Option<usize>, pub(crate) n_head: usize, pub(crate) rotary_dim: usize, pub(crate) activation_function: Activation, pub(crate) layer_norm_epsilon: f64, pub(crate) tie_word_embeddings: bool, pub(crate) pad_vocab_size_multiple: usize, } impl Config { pub fn v1() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 1024, n_layer: 20, n_inner: None, n_head: 16, rotary_dim: usize::min(32, 1024 / 16), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } pub fn v1_5() -> Self { Self { vocab_size: 51200, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } pub fn v2() -> Self { Self { vocab_size: 51200, n_positions: 2048, n_embd: 2560, n_layer: 32, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2560 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } // https://huggingface.co/teknium/Puffin-Phi-v2/blob/main/config.json pub fn puffin_phi_v2() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } // https://huggingface.co/teknium/Phi-Hermes-1.3B/blob/main/config.json pub fn phi_hermes_1_3b() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::NewGelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } } #[derive(Debug, Clone)] struct Embedding { wte: E, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = E::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?; Ok(Self { wte }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.wte.forward(xs) } } fn get_mask(size: usize, dtype: DType, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| if j > i { f32::NEG_INFINITY } else { 0. })) .collect(); Tensor::from_slice(&mask, (size, size), device)?.to_dtype(dtype) } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dim: usize, max_seq_len: usize, dtype: DType, dev: &Device) -> Result<Self> { let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?.to_dtype(dtype)?, cos: freqs.cos()?.to_dtype(dtype)?, }) } fn apply_rotary_emb_qkv( &self, qkv: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor, Tensor)> { let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?; if three != 3 { candle::bail!("unexpected shape for qkv {:?}", qkv.shape()) } let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?; let rotary_dim = rotary_dim * 2; let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?.contiguous()?; let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?; let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?.contiguous()?; let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?; let c = self.cos.narrow(0, seqlen_offset, seqlen)?; let s = self.sin.narrow(0, seqlen_offset, seqlen)?; let q_rot = candle_nn::rotary_emb::rope_thd(&q_rot, &c, &s)?; let k_rot = candle_nn::rotary_emb::rope_thd(&k_rot, &c, &s)?; let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?; let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?; let v = qkv.i((.., .., 2))?; Ok((q, k, v)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { fc1: Linear, fc2: Linear, act: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd); let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?; let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?; Ok(Self { fc1, fc2, act: cfg.activation_function, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct CausalLMHead { ln: candle_nn::LayerNorm, linear: Linear, } impl CausalLMHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?; Ok(Self { ln, linear }) } } impl Module for CausalLMHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.ln)? .apply(&self.linear)? .to_dtype(DType::F32) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MHA { wqkv: Linear, out_proj: Linear, rotary_emb: RotaryEmbedding, kv_cache: Option<(Tensor, Tensor)>, head_dim: usize, softmax_scale: f64, span: tracing::Span, span_rope: tracing::Span, span_mask: tracing::Span, span_softmax: tracing::Span, } impl MHA { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.n_embd / cfg.n_head; let op_size = cfg.n_embd; let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?; let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?; let rotary_emb = RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.dtype(), vb.device())?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); Ok(Self { wqkv, out_proj, head_dim, kv_cache: None, rotary_emb, softmax_scale, span: tracing::span!(tracing::Level::TRACE, "mha"), span_rope: tracing::span!(tracing::Level::TRACE, "rope"), span_mask: tracing::span!(tracing::Level::TRACE, "mask"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self .wqkv .forward(xs)? .reshape((b_size, seq_len, 3, (), self.head_dim))?; let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(1)?, }; // In the python implementation, a single tensor is returned with the third axis of size 3. let (q, k, v) = { let _enter = self.span_rope.enter(); self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)? }; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 1)?; let v = Tensor::cat(&[prev_v, &v], 1)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); // scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale) let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s // causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1) // scores = scores + causal_mask.to(dtype=scores.dtype) let attn_weights = match mask { None => attn_weights, Some(mask) => { let _enter = self.span_mask.enter(); attn_weights.broadcast_add(mask)? } }; let attn_weights = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn_weights)? }; // output = torch.einsum('bhts,bshd->bthd', attention_drop, v) // attn_weights: b*h,t,s, v: b*h,s,d let attn_output = attn_weights.matmul(&v)?; // b*h,t,d let attn_output = attn_output .reshape((b_size, (), seq_len, self.head_dim))? .transpose(1, 2)? .flatten_from(D::Minus2)?; attn_output.apply(&self.out_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct ParallelBlock { ln: candle_nn::LayerNorm, mixer: MHA, mlp: MLP, span: tracing::Span, } impl ParallelBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let mixer = MHA::new(cfg, vb.pp("mixer"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { ln, mixer, mlp, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = xs.apply(&self.ln)?; let attn_outputs = self.mixer.forward(&xs, mask)?; let feed_forward_hidden_states = self.mlp.forward(&xs)?; attn_outputs + feed_forward_hidden_states + residual } fn clear_kv_cache(&mut self) { self.mixer.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct MixFormerSequentialForCausalLM { embedding: Embedding, blocks: Vec<ParallelBlock>, head: CausalLMHead, span: tracing::Span, } impl MixFormerSequentialForCausalLM { pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_head = vb.pp("lm_head"); let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embd"))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb_head)?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layers"); let embedding = Embedding::new(cfg, vb.pp(0))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp(i + 1))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.embedding)?; let mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.dtype(), xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())? } xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1) } pub fn forward_with_img( &mut self, bos_token: &Tensor, xs: &Tensor, img_embeds: &Tensor, ) -> Result<Tensor> { let _enter = self.span.enter(); let xs = xs.apply(&self.embedding)?; let bos_token = bos_token.apply(&self.embedding)?; // Python implementation sequence order is <bos token embedding><img embedding><rest of text embedding> // https://github.com/vikhyat/moondream/blob/a9d788a20d1543fb1479edc54106e88cff7759d3/moondream/moondream.py#L43-L56 let mut xs = Tensor::cat(&[bos_token, img_embeds.clone(), xs], 1)?; let (_b_size, seq_len, _embds) = xs.dims3()?; let mask = Some(get_mask(seq_len, xs.dtype(), xs.device())?); for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())? } let xs = xs .narrow(1, seq_len - 1, 1)? .apply(&self.head)? .squeeze(1)?; Ok(xs) } pub fn clear_kv_cache(&mut self) { self.blocks.iter_mut().for_each(|b| b.clear_kv_cache()) } }
candle/candle-transformers/src/models/mixformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/mixformer.rs", "repo_id": "candle", "token_count": 7964 }
45
use crate::generation::LogitsProcessor; use crate::models::t5; use candle::{IndexOp, Result, Tensor}; use candle_nn::{layer_norm, linear_b as linear, Activation, LayerNorm, Linear, VarBuilder}; #[derive(serde::Deserialize, Debug, Clone)] pub struct DecoderConfig { pub vocab_size: usize, pub max_position_embeddings: usize, pub num_hidden_layers: usize, pub ffn_dim: usize, pub num_attention_heads: usize, pub num_key_value_heads: Option<usize>, pub num_cross_attention_key_value_heads: Option<usize>, pub activation_function: Activation, pub hidden_size: usize, pub scale_embedding: bool, pub num_codebooks: usize, pub pad_token_id: usize, pub bos_token_id: usize, pub eos_token_id: usize, pub tie_word_embeddings: bool, pub rope_embeddings: bool, pub rope_theta: f64, } #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { pub decoder_start_token_id: u32, pub pad_token_id: u32, pub decoder: DecoderConfig, pub text_encoder: t5::Config, pub vocab_size: usize, pub audio_encoder: crate::models::dac::Config, } #[derive(Debug, Clone)] pub struct Attention { k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, is_causal: bool, kv_cache: Option<(Tensor, Tensor)>, scaling: f64, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, } impl Attention { fn new( num_kv_heads: usize, is_causal: bool, cfg: &DecoderConfig, vb: VarBuilder, ) -> Result<Self> { if cfg.rope_embeddings { candle::bail!("rope embeddings are not supported"); } let embed_dim = cfg.hidden_size; let head_dim = embed_dim / cfg.num_attention_heads; let kv_out_dim = num_kv_heads * head_dim; let k_proj = linear(embed_dim, kv_out_dim, false, vb.pp("k_proj"))?; let v_proj = linear(embed_dim, kv_out_dim, false, vb.pp("v_proj"))?; let q_proj = linear(embed_dim, embed_dim, false, vb.pp("q_proj"))?; let out_proj = linear(embed_dim, embed_dim, false, vb.pp("out_proj"))?; Ok(Self { k_proj, v_proj, q_proj, out_proj, is_causal, kv_cache: None, scaling: (head_dim as f64).powf(-0.5), num_heads: cfg.num_attention_heads, num_kv_heads, num_kv_groups: cfg.num_attention_heads / num_kv_heads, head_dim, }) } fn forward( &mut self, xs: &Tensor, key_value_states: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (xs.apply(&self.q_proj)? * self.scaling)? .reshape((b_sz, tgt_len, self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let key_states = match key_value_states { Some(states) => states.apply(&self.k_proj)?, None => xs.apply(&self.k_proj)?, }; let key_states = key_states .reshape((b_sz, (), self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let value_states = match key_value_states { Some(states) => states.apply(&self.v_proj)?, None => xs.apply(&self.v_proj)?, }; let value_states = value_states .reshape((b_sz, (), self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; if self.is_causal { self.kv_cache = Some((key_states.clone(), value_states.clone())); } let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_weights = query_states.matmul(&key_states.transpose(2, 3)?)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&value_states)?; attn_output .transpose(1, 2)? .reshape((b_sz, tgt_len, ()))? .apply(&self.out_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] pub struct DecoderLayer { self_attn: Attention, self_attn_layer_norm: LayerNorm, encoder_attn: Attention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, activation: Activation, } impl DecoderLayer { fn new(cfg: &DecoderConfig, vb: VarBuilder) -> Result<Self> { let kv_heads = cfg.num_key_value_heads.unwrap_or(cfg.num_attention_heads); let kv_heads_cross = cfg.num_cross_attention_key_value_heads.unwrap_or(kv_heads); let self_attn = Attention::new(kv_heads, true, cfg, vb.pp("self_attn"))?; let encoder_attn = Attention::new(kv_heads_cross, false, cfg, vb.pp("encoder_attn"))?; let self_attn_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear(cfg.hidden_size, cfg.ffn_dim, false, vb.pp("fc1"))?; let fc2 = linear(cfg.ffn_dim, cfg.hidden_size, false, vb.pp("fc2"))?; let final_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("final_layer_norm"))?; Ok(Self { self_attn, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, activation: cfg.activation_function, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, encoder_xs: &Tensor, encoder_attention_mask: Option<&Tensor>, ) -> Result<Tensor> { // Self attention let residual = xs; let xs = xs.apply(&self.self_attn_layer_norm)?; let xs = self.self_attn.forward(&xs, None, attention_mask)?; let xs = (residual + xs)?; // Cross attention let residual = &xs; let xs = xs.apply(&self.encoder_attn_layer_norm)?; let xs = self .encoder_attn .forward(&xs, Some(encoder_xs), encoder_attention_mask)?; let xs = (residual + xs)?; // Fully connected let residual = &xs; let xs = xs .apply(&self.final_layer_norm)? .apply(&self.fc1)? .apply(&self.activation)? .apply(&self.fc2)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.encoder_attn.clear_kv_cache(); } } #[derive(Debug, Clone)] pub struct Decoder { embed_tokens: Vec<candle_nn::Embedding>, embed_positions: Tensor, layers: Vec<DecoderLayer>, layer_norm: LayerNorm, num_codebooks: usize, hidden_size: usize, lm_heads: Vec<Linear>, dtype: candle::DType, } impl Decoder { pub fn new(cfg: &DecoderConfig, vb: VarBuilder) -> Result<Self> { let vb_d = vb.pp("model.decoder"); let mut embed_tokens = Vec::with_capacity(cfg.num_codebooks); let vb_e = vb_d.pp("embed_tokens"); for embed_idx in 0..cfg.num_codebooks { let e = candle_nn::embedding(cfg.vocab_size + 1, cfg.hidden_size, vb_e.pp(embed_idx))?; embed_tokens.push(e) } let embed_positions = vb_d.get( (cfg.max_position_embeddings, cfg.hidden_size), "embed_positions.weights", )?; let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_d.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb_d.pp("layer_norm"))?; let mut lm_heads = Vec::with_capacity(cfg.num_codebooks); let vb_l = vb.pp("lm_heads"); for lm_idx in 0..cfg.num_codebooks { let lm_head = linear(cfg.hidden_size, cfg.vocab_size, false, vb_l.pp(lm_idx))?; lm_heads.push(lm_head) } Ok(Self { embed_tokens, embed_positions, layers, layer_norm, num_codebooks: cfg.num_codebooks, lm_heads, hidden_size: cfg.hidden_size, dtype: vb.dtype(), }) } pub fn forward( &mut self, input_ids: &Tensor, prompt_hidden_states: Option<&Tensor>, attention_mask: Option<&Tensor>, encoder_xs: &Tensor, encoder_attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Vec<Tensor>> { let (b_sz, num_codebooks, seq_len) = input_ids.dims3()?; if num_codebooks != self.num_codebooks { candle::bail!("unexpected num codebooks in input {:?}", input_ids.shape()) } let mut inputs_embeds = Tensor::zeros( (b_sz, seq_len, self.hidden_size), self.dtype, input_ids.device(), )?; for (idx, embs) in self.embed_tokens.iter().enumerate() { let e = input_ids.i((.., idx))?.apply(embs)?; inputs_embeds = (inputs_embeds + e)? } let inputs_embeds = match prompt_hidden_states { None => inputs_embeds, Some(pis) => Tensor::cat(&[pis, &inputs_embeds], 1)?, }; let embed_positions = self .embed_positions .i(seqlen_offset..seqlen_offset + inputs_embeds.dim(1)?)?; let mut xs = (inputs_embeds + embed_positions.unsqueeze(0))?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask, encoder_xs, encoder_attention_mask)?; } let xs = xs.apply(&self.layer_norm)?; let mut lm_logits = Vec::with_capacity(self.num_codebooks); for lm_head in self.lm_heads.iter() { let logits = xs.apply(lm_head)?; lm_logits.push(logits) } Ok(lm_logits) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } } #[derive(Debug, Clone)] pub struct Model { pub embed_prompts: candle_nn::Embedding, pub enc_to_dec_proj: Option<Linear>, pub decoder: Decoder, pub text_encoder: t5::T5EncoderModel, pub decoder_start_token_id: u32, pub pad_token_id: u32, pub audio_encoder: crate::models::dac::Model, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let text_encoder = t5::T5EncoderModel::load(vb.pp("text_encoder"), &cfg.text_encoder)?; let decoder = Decoder::new(&cfg.decoder, vb.pp("decoder"))?; let embed_prompts = candle_nn::embedding( cfg.vocab_size, cfg.decoder.hidden_size, vb.pp("embed_prompts"), )?; let enc_to_dec_proj = if cfg.text_encoder.d_model != cfg.decoder.hidden_size { let proj = linear( cfg.text_encoder.d_model, cfg.decoder.hidden_size, true, vb.pp("enc_to_dec_proj"), )?; Some(proj) } else { None }; let audio_encoder = crate::models::dac::Model::new(&cfg.audio_encoder, vb.pp("audio_encoder"))?; Ok(Self { decoder, text_encoder, embed_prompts, enc_to_dec_proj, decoder_start_token_id: cfg.decoder_start_token_id, pad_token_id: cfg.pad_token_id, audio_encoder, }) } /// Note that the returned tensor uses the CPU device. pub fn generate( &mut self, prompt_tokens: &Tensor, description_tokens: &Tensor, mut lp: LogitsProcessor, max_steps: usize, ) -> Result<Tensor> { self.decoder.clear_kv_cache(); self.text_encoder.clear_kv_cache(); let encoded = self.text_encoder.forward(description_tokens)?; let encoded = match self.enc_to_dec_proj.as_ref() { None => encoded, Some(proj) => encoded.apply(proj)?, }; let prompt_hidden_states = prompt_tokens.apply(&self.embed_prompts)?; let num_codebooks = self.decoder.num_codebooks; let mut audio_tokens = vec![self.decoder_start_token_id; num_codebooks]; let mut all_audio_tokens = vec![vec![]; num_codebooks]; let prompt_len = prompt_hidden_states.dim(1)?; for step in 0..max_steps { let input_ids = Tensor::from_slice( audio_tokens.as_slice(), (1, num_codebooks, 1), prompt_tokens.device(), )?; let (prompt_hidden_states, pos) = if step == 0 { (Some(&prompt_hidden_states), 0) } else { (None, step + prompt_len) }; let causal_mask = if pos == 0 { self.prepare_causal_mask(prompt_len + 1, prompt_len + 1, input_ids.device())? } else { self.prepare_causal_mask(1, pos + 1, input_ids.device())? }; let logits = self.decoder.forward( &input_ids, prompt_hidden_states, Some(&causal_mask), &encoded, None, pos, )?; for (logit_idx, logit) in logits.iter().enumerate() { if logit_idx > step { break; } if audio_tokens[logit_idx] != self.pad_token_id { let logit = logit.i((0, logit.dim(1)? - 1))?; let token = lp.sample(&logit)?; audio_tokens[logit_idx] = token } } if audio_tokens.iter().all(|v| v == &self.pad_token_id) { break; } for (cb_idx, &token) in audio_tokens.iter().enumerate() { if token != self.decoder_start_token_id && token != self.pad_token_id { all_audio_tokens[cb_idx].push(token) } } } let min_len = all_audio_tokens.iter().map(|v| v.len()).min().unwrap_or(0); all_audio_tokens.iter_mut().for_each(|v| { v.resize(min_len, 0); }); let all_audio_tokens = Tensor::new(all_audio_tokens, &candle::Device::Cpu)?; Ok(all_audio_tokens) } fn prepare_causal_mask( &self, q_len: usize, kv_len: usize, device: &candle::Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..q_len) .flat_map(|i| { (0..kv_len).map(move |j| { if i + kv_len < j + q_len { f32::NEG_INFINITY } else { 0. } }) }) .collect(); Tensor::from_slice(&mask, (q_len, kv_len), device) } }
candle/candle-transformers/src/models/parler_tts.rs/0
{ "file_path": "candle/candle-transformers/src/models/parler_tts.rs", "repo_id": "candle", "token_count": 8397 }
46
use crate::quantized_nn::{linear_b as linear, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use std::sync::Arc; use crate::models::recurrent_gemma::{Config, Rglru, RmsNorm, RotaryEmbedding, TemporalBlockType}; fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> { let weight = vb.get(size, "weight")?.dequantize(vb.device())?; Ok(RmsNorm::from_weight(weight, eps)) } #[derive(Debug, Clone)] struct Mlp { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let intermediate_size = cfg.intermediate_size / 2; let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?; let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?; let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_activation, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; (gate * xs.apply(&self.up_proj))?.apply(&self.down_proj) } } fn rglru(cfg: &Config, vb: VarBuilder) -> Result<Rglru> { let h = cfg.hidden_size; let lru_width = cfg.lru_width.unwrap_or(h); let n_heads = cfg.num_attention_heads; let block_width = lru_width / n_heads; let recurrent_param = vb.get((lru_width,), "recurrent_param")?; let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?; let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?; let recurrent_gate_weight = vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?; let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?; Ok(Rglru { recurrent_param: recurrent_param.dequantize(vb.device())?, input_gate_bias: input_gate_bias.dequantize(vb.device())?, input_gate_weight: input_gate_weight.dequantize(vb.device())?, recurrent_gate_bias: recurrent_gate_bias.dequantize(vb.device())?, recurrent_gate_weight: recurrent_gate_weight.dequantize(vb.device())?, block_width, n_heads, recurrent_states: None, }) } #[derive(Debug, Clone)] struct RecurrentBlock { linear_y: Linear, linear_x: Linear, linear_out: Linear, conv_1d: candle_nn::Conv1d, conv1d_state: Option<Tensor>, conv1d_width: usize, rg_lru: Rglru, act_fn: candle_nn::Activation, } impl RecurrentBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let lru_width = cfg.lru_width.unwrap_or(h); let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?; let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?; let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?; let conv_1d = { let ws = vb .get((lru_width, 1, cfg.conv1d_width), "conv_1d.weight")? .dequantize(vb.device())?; let bs = vb.get(lru_width, "conv_1d.bias")?.dequantize(vb.device())?; let config = candle_nn::Conv1dConfig { groups: lru_width, padding: cfg.conv1d_width - 1, ..Default::default() }; candle_nn::Conv1d::new(ws, Some(bs), config) }; let rg_lru = rglru(cfg, vb.pp("rg_lru"))?; Ok(Self { linear_y, linear_x, linear_out, conv_1d, conv1d_state: None, conv1d_width: cfg.conv1d_width, rg_lru, act_fn: cfg.hidden_activation, }) } pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (_b_sz, seq_len, _) = xs.dims3()?; let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?; let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?; let x_branch = if pos == 0 { let x_len = x_branch.dim(D::Minus1)?; let pad = self.conv1d_width as i64 - x_len as i64 - 1; let padded = match pad.cmp(&0) { std::cmp::Ordering::Equal => x_branch.clone(), std::cmp::Ordering::Less => { let rev_pad = (-pad) as usize; x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)? } std::cmp::Ordering::Greater => { x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)? } }; self.conv1d_state = Some(padded); x_branch .apply(&self.conv_1d)? .narrow(D::Minus1, 0, seq_len)? } else { let conv_state = match self.conv1d_state.as_ref() { None => candle::bail!("empty cache despite pos > 0"), Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?, }; let w = self.conv_1d.weight().i((.., 0, ..))?; let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?; let x_branch = match self.conv_1d.bias() { None => x_branch, Some(b) => x_branch.broadcast_add(b)?, }; let x_branch = x_branch.unsqueeze(D::Minus1)?; self.conv1d_state = Some(conv_state.i((.., .., 1..))?); x_branch }; let x_branch = x_branch.transpose(1, 2)?; let x_branch = self.rg_lru.forward(&x_branch, pos)?; (x_branch * y_branch)?.apply(&self.linear_out) } } #[derive(Debug, Clone)] struct SdpaAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_heads: usize, n_kv_heads: usize, head_dim: usize, hidden_size: usize, kv_cache: Option<(Tensor, Tensor)>, rotary_emb: Arc<RotaryEmbedding>, } impl SdpaAttention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let n_heads = cfg.num_attention_heads; let n_kv_heads = cfg.num_key_value_heads; let hd = cfg.head_dim; let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?; let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?; let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?; let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_heads, n_kv_heads, head_dim: hd, hidden_size: h, kv_cache: None, rotary_emb, }) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_heads / self.n_kv_heads; crate::utils::repeat_kv(x, n_rep) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { let (bsz, q_len, _) = xs.dims3()?; let query_states = xs.apply(&self.q_proj)?; let key_states = xs.apply(&self.k_proj)?; let value_states = xs.apply(&self.v_proj)?; let query_states = query_states .reshape((bsz, q_len, self.n_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((bsz, q_len, self.n_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((bsz, q_len, self.n_kv_heads, self.head_dim))? .transpose(1, 2)?; let query_states = query_states.chunk(2, D::Minus1)?; let key_states = key_states.chunk(2, D::Minus1)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?; let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = self.repeat_kv(key_states)?; let value_states = self.repeat_kv(value_states)?; let xs = { let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?; let att = if q_len == 1 { att } else { match attention_mask { None => att, Some(mask) => att.broadcast_add(mask)?, } }; let att = candle_nn::ops::softmax_last_dim(&att)?; att.matmul(&value_states.contiguous()?)? }; let xs = xs .transpose(1, 2)? .reshape((bsz, q_len, self.hidden_size))?; self.o_proj.forward(&xs) } } #[derive(Debug, Clone)] enum TemporalBlock { Recurrent(RecurrentBlock), Attention(SdpaAttention), } impl TemporalBlock { fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { match self { Self::Recurrent(b) => b.forward(xs, pos), Self::Attention(b) => b.forward(xs, attention_mask, pos), } } } #[derive(Debug, Clone)] struct DecoderLayer { temporal_pre_norm: RmsNorm, channel_pre_norm: RmsNorm, temporal_block: TemporalBlock, mlp_block: Mlp, } impl DecoderLayer { fn new( block_idx: usize, rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let h = cfg.hidden_size; let temporal_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?; let channel_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?; let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] { TemporalBlockType::Recurrent => { let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?; TemporalBlock::Recurrent(block) } TemporalBlockType::Attention => { let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?; TemporalBlock::Attention(block) } }; let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?; Ok(Self { temporal_pre_norm, channel_pre_norm, temporal_block, mlp_block, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.temporal_pre_norm)?; let xs = self.temporal_block.forward(&xs, attention_mask, pos)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?; xs + residual } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, final_norm: RmsNorm, lm_head: Linear, hidden_size: usize, logits_soft_cap: f64, device: Device, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb.device())?); let vb_b = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); for idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?; layers.push(layer) } let final_norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?; let lm_head = linear( cfg.hidden_size, cfg.vocab_size, false, vb.pp("embed_tokens"), )?; Ok(Self { embed_tokens, layers, final_norm, lm_head, hidden_size: cfg.hidden_size, logits_soft_cap: cfg.logits_soft_cap, device: vb.device().clone(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (b_size, seq_len) = xs.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?; Some(mask) }; let xs = xs.apply(&self.embed_tokens)?; let mut xs = (xs * (self.hidden_size as f64).sqrt())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), pos)?; } let logits = xs .narrow(1, seq_len - 1, 1)? .apply(&self.final_norm)? .apply(&self.lm_head)?; let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?; Ok(logits) } }
candle/candle-transformers/src/models/quantized_recurrent_gemma.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_recurrent_gemma.rs", "repo_id": "candle", "token_count": 7690 }
47
use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] struct PositionEmbeddingRandom { positional_encoding_gaussian_matrix: Tensor, } impl PositionEmbeddingRandom { fn new(num_pos_feats: usize, vb: VarBuilder) -> Result<Self> { let positional_encoding_gaussian_matrix = vb.get((2, num_pos_feats), "positional_encoding_gaussian_matrix")?; Ok(Self { positional_encoding_gaussian_matrix, }) } fn pe_encoding(&self, coords: &Tensor) -> Result<Tensor> { let coords = coords.affine(2., -1.)?; let coords = coords.broadcast_matmul(&self.positional_encoding_gaussian_matrix)?; let coords = (coords * (2. * std::f64::consts::PI))?; Tensor::cat(&[coords.sin()?, coords.cos()?], D::Minus1) } fn forward(&self, h: usize, w: usize) -> Result<Tensor> { let device = self.positional_encoding_gaussian_matrix.device(); let x_embed = (Tensor::arange(0u32, w as u32, device)?.to_dtype(DType::F32)? + 0.5)?; let y_embed = (Tensor::arange(0u32, h as u32, device)?.to_dtype(DType::F32)? + 0.5)?; let x_embed = (x_embed / w as f64)? .reshape((1, ()))? .broadcast_as((h, w))?; let y_embed = (y_embed / h as f64)? .reshape(((), 1))? .broadcast_as((h, w))?; let coords = Tensor::stack(&[&x_embed, &y_embed], D::Minus1)?; self.pe_encoding(&coords)?.permute((2, 0, 1)) } fn forward_with_coords( &self, coords_input: &Tensor, image_size: (usize, usize), ) -> Result<Tensor> { let coords0 = (coords_input.narrow(D::Minus1, 0, 1)? / image_size.1 as f64)?; let coords1 = (coords_input.narrow(D::Minus1, 1, 1)? / image_size.0 as f64)?; let c = coords_input.dim(D::Minus1)?; let coords_rest = coords_input.narrow(D::Minus1, 2, c - 2)?; let coords = Tensor::cat(&[&coords0, &coords1, &coords_rest], D::Minus1)?; self.pe_encoding(&coords) } } #[derive(Debug)] pub struct PromptEncoder { pe_layer: PositionEmbeddingRandom, point_embeddings: Vec<candle_nn::Embedding>, not_a_point_embed: candle_nn::Embedding, mask_downscaling_conv1: candle_nn::Conv2d, mask_downscaling_ln1: super::LayerNorm2d, mask_downscaling_conv2: candle_nn::Conv2d, mask_downscaling_ln2: super::LayerNorm2d, mask_downscaling_conv3: candle_nn::Conv2d, no_mask_embed: candle_nn::Embedding, image_embedding_size: (usize, usize), input_image_size: (usize, usize), embed_dim: usize, span: tracing::Span, } impl PromptEncoder { pub fn new( embed_dim: usize, image_embedding_size: (usize, usize), input_image_size: (usize, usize), mask_in_chans: usize, vb: VarBuilder, ) -> Result<Self> { let num_points_embeddings = 4; let pe_layer = PositionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?; let not_a_point_embed = candle_nn::embedding(1, embed_dim, vb.pp("not_a_point_embed"))?; let no_mask_embed = candle_nn::embedding(1, embed_dim, vb.pp("no_mask_embed"))?; let cfg = candle_nn::Conv2dConfig { stride: 2, ..Default::default() }; let mask_downscaling_conv1 = candle_nn::conv2d(1, mask_in_chans / 4, 2, cfg, vb.pp("mask_downscaling.0"))?; let mask_downscaling_conv2 = candle_nn::conv2d( mask_in_chans / 4, mask_in_chans, 2, cfg, vb.pp("mask_downscaling.3"), )?; let mask_downscaling_conv3 = candle_nn::conv2d( mask_in_chans, embed_dim, 1, Default::default(), vb.pp("mask_downscaling.6"), )?; let mask_downscaling_ln1 = super::LayerNorm2d::new(mask_in_chans / 4, 1e-6, vb.pp("mask_downscaling.1"))?; let mask_downscaling_ln2 = super::LayerNorm2d::new(mask_in_chans, 1e-6, vb.pp("mask_downscaling.4"))?; let mut point_embeddings = Vec::with_capacity(num_points_embeddings); let vb_e = vb.pp("point_embeddings"); for i in 0..num_points_embeddings { let emb = candle_nn::embedding(1, embed_dim, vb_e.pp(i))?; point_embeddings.push(emb) } let span = tracing::span!(tracing::Level::TRACE, "prompt-encoder"); Ok(Self { pe_layer, point_embeddings, not_a_point_embed, mask_downscaling_conv1, mask_downscaling_ln1, mask_downscaling_conv2, mask_downscaling_ln2, mask_downscaling_conv3, no_mask_embed, image_embedding_size, input_image_size, embed_dim, span, }) } pub fn get_dense_pe(&self) -> Result<Tensor> { self.pe_layer .forward(self.image_embedding_size.0, self.image_embedding_size.1)? .unsqueeze(0) } fn embed_masks(&self, masks: &Tensor) -> Result<Tensor> { masks .apply(&self.mask_downscaling_conv1)? .apply(&self.mask_downscaling_ln1)? .gelu()? .apply(&self.mask_downscaling_conv2)? .apply(&self.mask_downscaling_ln2)? .gelu()? .apply(&self.mask_downscaling_conv3) } fn embed_points(&self, points: &Tensor, labels: &Tensor, pad: bool) -> Result<Tensor> { let points = (points + 0.5)?; let dev = points.device(); let (points, labels) = if pad { let padding_point = Tensor::zeros((points.dim(0)?, 1, 2), DType::F32, dev)?; let padding_label = (Tensor::ones((labels.dim(0)?, 1), DType::F32, dev)? * (-1f64))?; let points = Tensor::cat(&[&points, &padding_point], 1)?; let labels = Tensor::cat(&[labels, &padding_label], 1)?; (points, labels) } else { (points, labels.clone()) }; let point_embedding = self .pe_layer .forward_with_coords(&points, self.input_image_size)?; let labels = labels.unsqueeze(2)?.broadcast_as(point_embedding.shape())?; let zeros = point_embedding.zeros_like()?; let point_embedding = labels.lt(0f32)?.where_cond( &self .not_a_point_embed .embeddings() .broadcast_as(zeros.shape())?, &point_embedding, )?; let labels0 = labels.eq(0f32)?.where_cond( &self.point_embeddings[0] .embeddings() .broadcast_as(zeros.shape())?, &zeros, )?; let point_embedding = (point_embedding + labels0)?; let labels1 = labels.eq(1f32)?.where_cond( &self.point_embeddings[1] .embeddings() .broadcast_as(zeros.shape())?, &zeros, )?; let point_embedding = (point_embedding + labels1)?; Ok(point_embedding) } fn embed_boxes(&self, boxes: &Tensor) -> Result<Tensor> { let boxes = (boxes + 0.5)?; let coords = boxes.reshape(((), 2, 2))?; let corner_embedding = self .pe_layer .forward_with_coords(&coords, self.input_image_size)?; let ce1 = corner_embedding.i((.., 0))?; let ce2 = corner_embedding.i((.., 1))?; let ce1 = (ce1 + self.point_embeddings[2].embeddings())?; let ce2 = (ce2 + self.point_embeddings[3].embeddings())?; Tensor::cat(&[&ce1, &ce2], 1) } pub fn forward( &self, points: Option<(&Tensor, &Tensor)>, boxes: Option<&Tensor>, masks: Option<&Tensor>, ) -> Result<(Tensor, Tensor)> { let _enter = self.span.enter(); let se_points = match points { Some((coords, labels)) => Some(self.embed_points(coords, labels, boxes.is_none())?), None => None, }; let se_boxes = match boxes { Some(boxes) => Some(self.embed_boxes(boxes)?), None => None, }; let sparse_embeddings = match (se_points, se_boxes) { (Some(se_points), Some(se_boxes)) => Tensor::cat(&[se_points, se_boxes], 1)?, (Some(se_points), None) => se_points, (None, Some(se_boxes)) => se_boxes, (None, None) => { let dev = self.no_mask_embed.embeddings().device(); Tensor::zeros((1, 0, self.embed_dim), DType::F32, dev)? } }; let dense_embeddings = match masks { None => { let emb = self.no_mask_embed.embeddings(); emb.reshape((1, (), 1, 1))?.expand(( 1, emb.elem_count(), self.image_embedding_size.0, self.image_embedding_size.1, ))? } Some(masks) => self.embed_masks(masks)?, }; Ok((sparse_embeddings, dense_embeddings)) } }
candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs", "repo_id": "candle", "token_count": 4745 }
48
#![allow(dead_code)] //! # Variational Auto-Encoder (VAE) Models. //! //! Auto-encoder models compress their input to a usually smaller latent space //! before expanding it back to its original shape. This results in the latent values //! compressing the original information. use super::unet_2d_blocks::{ DownEncoderBlock2D, DownEncoderBlock2DConfig, UNetMidBlock2D, UNetMidBlock2DConfig, UpDecoderBlock2D, UpDecoderBlock2DConfig, }; use candle::{Result, Tensor}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone)] struct EncoderConfig { // down_block_types: DownEncoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, double_z: bool, } impl Default for EncoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, double_z: true, } } } #[derive(Debug)] struct Encoder { conv_in: nn::Conv2d, down_blocks: Vec<DownEncoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: EncoderConfig, } impl Encoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: EncoderConfig, ) -> Result<Self> { let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, config.block_out_channels[0], 3, conv_cfg, vs.pp("conv_in"), )?; let mut down_blocks = vec![]; let vs_down_blocks = vs.pp("down_blocks"); for index in 0..config.block_out_channels.len() { let out_channels = config.block_out_channels[index]; let in_channels = if index > 0 { config.block_out_channels[index - 1] } else { config.block_out_channels[0] }; let is_final = index + 1 == config.block_out_channels.len(); let cfg = DownEncoderBlock2DConfig { num_layers: config.layers_per_block, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_downsample: !is_final, downsample_padding: 0, ..Default::default() }; let down_block = DownEncoderBlock2D::new( vs_down_blocks.pp(index.to_string()), in_channels, out_channels, cfg, )?; down_blocks.push(down_block) } let last_block_out_channels = *config.block_out_channels.last().unwrap(); let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let conv_norm_out = nn::group_norm( config.norm_num_groups, last_block_out_channels, 1e-6, vs.pp("conv_norm_out"), )?; let conv_out_channels = if config.double_z { 2 * out_channels } else { out_channels }; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( last_block_out_channels, conv_out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, down_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.conv_in)?; for down_block in self.down_blocks.iter() { xs = xs.apply(down_block)? } let xs = self .mid_block .forward(&xs, None)? .apply(&self.conv_norm_out)?; nn::ops::silu(&xs)?.apply(&self.conv_out) } } #[derive(Debug, Clone)] struct DecoderConfig { // up_block_types: UpDecoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, } impl Default for DecoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, } } } #[derive(Debug)] struct Decoder { conv_in: nn::Conv2d, up_blocks: Vec<UpDecoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: DecoderConfig, } impl Decoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: DecoderConfig, ) -> Result<Self> { let n_block_out_channels = config.block_out_channels.len(); let last_block_out_channels = *config.block_out_channels.last().unwrap(); let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, last_block_out_channels, 3, conv_cfg, vs.pp("conv_in"), )?; let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let mut up_blocks = vec![]; let vs_up_blocks = vs.pp("up_blocks"); let reversed_block_out_channels: Vec<_> = config.block_out_channels.iter().copied().rev().collect(); for index in 0..n_block_out_channels { let out_channels = reversed_block_out_channels[index]; let in_channels = if index > 0 { reversed_block_out_channels[index - 1] } else { reversed_block_out_channels[0] }; let is_final = index + 1 == n_block_out_channels; let cfg = UpDecoderBlock2DConfig { num_layers: config.layers_per_block + 1, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_upsample: !is_final, ..Default::default() }; let up_block = UpDecoderBlock2D::new( vs_up_blocks.pp(index.to_string()), in_channels, out_channels, cfg, )?; up_blocks.push(up_block) } let conv_norm_out = nn::group_norm( config.norm_num_groups, config.block_out_channels[0], 1e-6, vs.pp("conv_norm_out"), )?; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( config.block_out_channels[0], out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, up_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.mid_block.forward(&self.conv_in.forward(xs)?, None)?; for up_block in self.up_blocks.iter() { xs = up_block.forward(&xs)? } let xs = self.conv_norm_out.forward(&xs)?; let xs = nn::ops::silu(&xs)?; self.conv_out.forward(&xs) } } #[derive(Debug, Clone)] pub struct AutoEncoderKLConfig { pub block_out_channels: Vec<usize>, pub layers_per_block: usize, pub latent_channels: usize, pub norm_num_groups: usize, } impl Default for AutoEncoderKLConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 1, latent_channels: 4, norm_num_groups: 32, } } } pub struct DiagonalGaussianDistribution { mean: Tensor, std: Tensor, } impl DiagonalGaussianDistribution { pub fn new(parameters: &Tensor) -> Result<Self> { let mut parameters = parameters.chunk(2, 1)?.into_iter(); let mean = parameters.next().unwrap(); let logvar = parameters.next().unwrap(); let std = (logvar * 0.5)?.exp()?; Ok(DiagonalGaussianDistribution { mean, std }) } pub fn sample(&self) -> Result<Tensor> { let sample = self.mean.randn_like(0., 1.); &self.mean + &self.std * sample } } // https://github.com/huggingface/diffusers/blob/970e30606c2944e3286f56e8eb6d3dc6d1eb85f7/src/diffusers/models/vae.py#L485 // This implementation is specific to the config used in stable-diffusion-v1-5 // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json #[derive(Debug)] pub struct AutoEncoderKL { encoder: Encoder, decoder: Decoder, quant_conv: nn::Conv2d, post_quant_conv: nn::Conv2d, pub config: AutoEncoderKLConfig, } impl AutoEncoderKL { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: AutoEncoderKLConfig, ) -> Result<Self> { let latent_channels = config.latent_channels; let encoder_cfg = EncoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, double_z: true, }; let encoder = Encoder::new(vs.pp("encoder"), in_channels, latent_channels, encoder_cfg)?; let decoder_cfg = DecoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, }; let decoder = Decoder::new(vs.pp("decoder"), latent_channels, out_channels, decoder_cfg)?; let conv_cfg = Default::default(); let quant_conv = nn::conv2d( 2 * latent_channels, 2 * latent_channels, 1, conv_cfg, vs.pp("quant_conv"), )?; let post_quant_conv = nn::conv2d( latent_channels, latent_channels, 1, conv_cfg, vs.pp("post_quant_conv"), )?; Ok(Self { encoder, decoder, quant_conv, post_quant_conv, config, }) } /// Returns the distribution in the latent space. pub fn encode(&self, xs: &Tensor) -> Result<DiagonalGaussianDistribution> { let xs = self.encoder.forward(xs)?; let parameters = self.quant_conv.forward(&xs)?; DiagonalGaussianDistribution::new(&parameters) } /// Takes as input some sampled values. pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.post_quant_conv.forward(xs)?; self.decoder.forward(&xs) } }
candle/candle-transformers/src/models/stable_diffusion/vae.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/vae.rs", "repo_id": "candle", "token_count": 6006 }
49
pub mod attention_processor; pub mod common; pub mod ddpm; pub mod diffnext; pub mod paella_vq; pub mod prior;
candle/candle-transformers/src/models/wuerstchen/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/mod.rs", "repo_id": "candle", "token_count": 38 }
50
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Bert</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> import { hcl } from "https://cdn.skypack.dev/d3-color@3"; import { interpolateReds } from "https://cdn.skypack.dev/d3-scale-chromatic@3"; import { scaleLinear } from "https://cdn.skypack.dev/d3-scale@4"; import { getModelInfo, getEmbeddings, getWikiText, cosineSimilarity, } from "./utils.js"; const bertWorker = new Worker("./bertWorker.js", { type: "module", }); const inputContainerEL = document.querySelector("#input-container"); const textAreaEl = document.querySelector("#input-area"); const outputAreaEl = document.querySelector("#output-area"); const formEl = document.querySelector("#form"); const searchInputEl = document.querySelector("#search-input"); const formWikiEl = document.querySelector("#form-wiki"); const searchWikiEl = document.querySelector("#search-wiki"); const outputStatusEl = document.querySelector("#output-status"); const modelSelectEl = document.querySelector("#model"); const sentencesRegex = /(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z]\.)(?<=\.|\?)\s/gm; let sentenceEmbeddings = []; let currInputText = ""; let isCalculating = false; function toggleTextArea(state) { if (state) { textAreaEl.hidden = false; textAreaEl.focus(); } else { textAreaEl.hidden = true; } } inputContainerEL.addEventListener("focus", (e) => { toggleTextArea(true); }); textAreaEl.addEventListener("blur", (e) => { toggleTextArea(false); }); textAreaEl.addEventListener("focusout", (e) => { toggleTextArea(false); if (currInputText === textAreaEl.value || isCalculating) return; populateOutputArea(textAreaEl.value); calculateEmbeddings(textAreaEl.value); }); modelSelectEl.addEventListener("change", (e) => { if (currInputText === "" || isCalculating) return; populateOutputArea(textAreaEl.value); calculateEmbeddings(textAreaEl.value); }); function populateOutputArea(text) { currInputText = text; const sentences = text.split(sentencesRegex); outputAreaEl.innerHTML = ""; for (const [id, sentence] of sentences.entries()) { const sentenceEl = document.createElement("span"); sentenceEl.id = `sentence-${id}`; sentenceEl.innerText = sentence + " "; outputAreaEl.appendChild(sentenceEl); } } formEl.addEventListener("submit", async (e) => { e.preventDefault(); if (isCalculating || currInputText === "") return; toggleInputs(true); const modelID = modelSelectEl.value; const { modelURL, tokenizerURL, configURL, search_prefix } = getModelInfo(modelID); const text = searchInputEl.value; const query = search_prefix + searchInputEl.value; outputStatusEl.classList.remove("invisible"); outputStatusEl.innerText = "Calculating embeddings for query..."; isCalculating = true; const out = await getEmbeddings( bertWorker, modelURL, tokenizerURL, configURL, modelID, [query] ); outputStatusEl.classList.add("invisible"); const queryEmbeddings = out.output[0]; // calculate cosine similarity with all sentences given the query const distances = sentenceEmbeddings .map((embedding, id) => ({ id, similarity: cosineSimilarity(queryEmbeddings, embedding), })) .sort((a, b) => b.similarity - a.similarity) // getting top 10 most similar sentences .slice(0, 10); const colorScale = scaleLinear() .domain([ distances[distances.length - 1].similarity, distances[0].similarity, ]) .range([0, 1]) .interpolate(() => interpolateReds); outputAreaEl.querySelectorAll("span").forEach((el) => { el.style.color = "unset"; el.style.backgroundColor = "unset"; }); distances.forEach((d) => { const el = outputAreaEl.querySelector(`#sentence-${d.id}`); const color = colorScale(d.similarity); const fontColor = hcl(color).l < 70 ? "white" : "black"; el.style.color = fontColor; el.style.backgroundColor = color; }); outputAreaEl .querySelector(`#sentence-${distances[0].id}`) .scrollIntoView({ behavior: "smooth", block: "center", inline: "nearest", }); isCalculating = false; toggleInputs(false); }); async function calculateEmbeddings(text) { isCalculating = true; toggleInputs(true); const modelID = modelSelectEl.value; const { modelURL, tokenizerURL, configURL, document_prefix } = getModelInfo(modelID); const sentences = text.split(sentencesRegex); const allEmbeddings = []; outputStatusEl.classList.remove("invisible"); for (const [id, sentence] of sentences.entries()) { const query = document_prefix + sentence; outputStatusEl.innerText = `Calculating embeddings: sentence ${ id + 1 } of ${sentences.length}`; const embeddings = await getEmbeddings( bertWorker, modelURL, tokenizerURL, configURL, modelID, [query], updateStatus ); allEmbeddings.push(embeddings); } outputStatusEl.classList.add("invisible"); sentenceEmbeddings = allEmbeddings.map((e) => e.output[0]); isCalculating = false; toggleInputs(false); } function updateStatus(data) { if ("status" in data) { if (data.status === "loading") { outputStatusEl.innerText = data.message; outputStatusEl.classList.remove("invisible"); } } } function toggleInputs(state) { const interactive = document.querySelectorAll(".interactive"); interactive.forEach((el) => { if (state) { el.disabled = true; } else { el.disabled = false; } }); } searchWikiEl.addEventListener("input", () => { searchWikiEl.setCustomValidity(""); }); formWikiEl.addEventListener("submit", async (e) => { e.preventDefault(); if ("example" in e.submitter.dataset) { searchWikiEl.value = e.submitter.innerText; } const text = searchWikiEl.value; if (isCalculating || text === "") return; try { const wikiText = await getWikiText(text); searchWikiEl.setCustomValidity(""); textAreaEl.innerHTML = wikiText; populateOutputArea(wikiText); calculateEmbeddings(wikiText); searchWikiEl.value = ""; } catch { searchWikiEl.setCustomValidity("Invalid Wikipedia article name"); searchWikiEl.reportValidity(); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle BERT</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Running sentence embeddings and similarity search in the browser using the Bert Model written with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> and compiled to Wasm. Embeddings models from are from <a href="https://huggingface.co/sentence-transformers/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > Sentence Transformers </a> and <a href="https://huggingface.co/intfloat/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > Liang Wang - e5 Models </a> </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" > <option value="intfloat_e5_small_v2" selected> intfloat/e5-small-v2 (133 MB) </option> <option value="intfloat_e5_base_v2"> intfloat/e5-base-v2 (438 MB) </option> <option value="intfloat_multilingual_e5_small"> intfloat/multilingual-e5-small (471 MB) </option> <option value="sentence_transformers_all_MiniLM_L6_v2"> sentence-transformers/all-MiniLM-L6-v2 (90.9 MB) </option> <option value="sentence_transformers_all_MiniLM_L12_v2"> sentence-transformers/all-MiniLM-L12-v2 (133 MB) </option> </select> </div> <div> <h3 class="font-medium">Examples:</h3> <form id="form-wiki" class="flex text-xs rounded-md justify-between w-min gap-3" > <input type="submit" hidden /> <button data-example class="disabled:cursor-not-allowed interactive"> Pizza </button> <button data-example class="disabled:cursor-not-allowed interactive"> Paris </button> <button data-example class="disabled:cursor-not-allowed interactive"> Physics </button> <input type="text" id="search-wiki" title="Search Wikipedia article by title" class="font-light py-0 mx-1 resize-none outline-none w-32 disabled:cursor-not-allowed interactive" placeholder="Load Wikipedia article..." /> <button title="Search Wikipedia article and load into input" class="bg-gray-700 hover:bg-gray-800 text-white font-normal px-2 py-1 rounded disabled:bg-gray-300 disabled:cursor-not-allowed interactive" > Load </button> </form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center" > <input type="submit" hidden /> <input type="text" id="search-input" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none interactive disabled:cursor-not-allowed" placeholder="Search query here..." /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed interactive" > Search </button> </form> <div> <h3 class="font-medium">Input text:</h3> <div class="flex justify-between items-center"> <div class="rounded-md inline text-xs"> <span id="output-status" class="m-auto font-light invisible" >C</span > </div> </div> <div id="input-container" tabindex="0" class="min-h-[250px] bg-slate-100 text-gray-500 rounded-md p-4 flex flex-col gap-2 relative" > <textarea id="input-area" hidden value="" placeholder="Input text to perform semantic similarity search..." class="flex-1 resize-none outline-none left-0 right-0 top-0 bottom-0 m-4 absolute interactive disabled:invisible" ></textarea> <p id="output-area" class="grid-rows-2"> Input text to perform semantic similarity search... </p> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/bert/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/bert/lib-example.html", "repo_id": "candle", "token_count": 6066 }
51
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Llama.c Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for audio examples const MODELS_BASE_URL = "https://huggingface.co/karpathy/tinyllamas/resolve/main"; // models base url const MODELS = { stories15M: { url: "stories15M.bin", seq_len: 256, }, stories42M: { url: "stories42M.bin", seq_len: 1024, }, stories110M: { url: "stories110M.bin", seq_len: 1024, }, }; const llamaWorker = new Worker("./llama2cWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = `${MODELS_BASE_URL}/${model.url}`; const prompt = getValue("prompt"); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = `<span class="font-semibold">${prompt}</span>${sentence.replace( /\<s\>|\<\/s\>/g, "" )}`; outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { llamaWorker.postMessage({ weightsURL, modelID, tokenizerURL: "tokenizer.json", prompt, temp: temperature, top_p: topP, repeatPenalty, seed: BigInt(seed), maxSeqLen, command: "start", }); const handleAbort = () => { llamaWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { llamaWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); llamaWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); let runController = new AbortController(); let isRunning = false; modelSelect.addEventListener("change", (e) => { const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = model.seq_len; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Llama2.c</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://github.com/karpathy/llama2.c" target="_blank" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >Llama2.c</a > is Andrey Karpathy's C implementation of the Llama 2 LLM model in C. This demo uses <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> to run Llama2.c in the browser using rust/wasm. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> <option value="stories15M" selected>stories 15M (60.8 MB)</option> <option value="stories42M">stories 42M (167 MB)</option> <option value="stories110M">stories 110M (438 MB)</option> </select> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add your prompt here..." value="Once upon a time" /> <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40"> <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="256" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.40" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.40</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"> <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1 text-sm"></div> <p hidden id="output-generation" class="grid-rows-2"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/llama2-c/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/lib-example.html", "repo_id": "candle", "token_count": 6089 }
52
## Running T5 with Candle and WASM Here, we provide two examples of how to run Bert using a Candle-compiled WASM binary and runtime. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m.js"; ``` For the quantized version, we need to import the quantized module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m-quantized.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
candle/candle-wasm-examples/t5/README.md/0
{ "file_path": "candle/candle-wasm-examples/t5/README.md", "repo_id": "candle", "token_count": 282 }
53
// Audio processing code, adapted from whisper.cpp // https://github.com/ggerganov/whisper.cpp use super::worker; pub trait Float: num_traits::Float + num_traits::FloatConst + num_traits::NumAssign {} impl Float for f32 {} impl Float for f64 {} // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2357 fn fft<T: Float>(inp: &[T]) -> Vec<T> { let n = inp.len(); let zero = T::zero(); if n == 1 { return vec![inp[0], zero]; } if n % 2 == 1 { return dft(inp); } let mut out = vec![zero; n * 2]; let mut even = Vec::with_capacity(n / 2); let mut odd = Vec::with_capacity(n / 2); for (i, &inp) in inp.iter().enumerate() { if i % 2 == 0 { even.push(inp) } else { odd.push(inp); } } let even_fft = fft(&even); let odd_fft = fft(&odd); let two_pi = T::PI() + T::PI(); let n_t = T::from(n).unwrap(); for k in 0..n / 2 { let k_t = T::from(k).unwrap(); let theta = two_pi * k_t / n_t; let re = theta.cos(); let im = -theta.sin(); let re_odd = odd_fft[2 * k]; let im_odd = odd_fft[2 * k + 1]; out[2 * k] = even_fft[2 * k] + re * re_odd - im * im_odd; out[2 * k + 1] = even_fft[2 * k + 1] + re * im_odd + im * re_odd; out[2 * (k + n / 2)] = even_fft[2 * k] - re * re_odd + im * im_odd; out[2 * (k + n / 2) + 1] = even_fft[2 * k + 1] - re * im_odd - im * re_odd; } out } // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2337 fn dft<T: Float>(inp: &[T]) -> Vec<T> { let zero = T::zero(); let n = inp.len(); let two_pi = T::PI() + T::PI(); let mut out = Vec::with_capacity(2 * n); let n_t = T::from(n).unwrap(); for k in 0..n { let k_t = T::from(k).unwrap(); let mut re = zero; let mut im = zero; for (j, &inp) in inp.iter().enumerate() { let j_t = T::from(j).unwrap(); let angle = two_pi * k_t * j_t / n_t; re += inp * angle.cos(); im -= inp * angle.sin(); } out.push(re); out.push(im); } out } #[allow(clippy::too_many_arguments)] // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2414 fn log_mel_spectrogram_w<T: Float>( ith: usize, hann: &[T], samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, speed_up: bool, n_len: usize, n_mel: usize, n_threads: usize, ) -> Vec<T> { let n_fft = if speed_up { 1 + fft_size / 4 } else { 1 + fft_size / 2 }; let zero = T::zero(); let half = T::from(0.5).unwrap(); let mut fft_in = vec![zero; fft_size]; let mut mel = vec![zero; n_len * n_mel]; for i in (ith..n_len).step_by(n_threads) { let offset = i * fft_step; // apply Hanning window for j in 0..fft_size { fft_in[j] = if offset + j < samples.len() { hann[j] * samples[offset + j] } else { zero } } // FFT -> mag^2 let mut fft_out: Vec<T> = fft(&fft_in); for j in 0..fft_size { fft_out[j] = fft_out[2 * j] * fft_out[2 * j] + fft_out[2 * j + 1] * fft_out[2 * j + 1]; } for j in 1..fft_size / 2 { let v = fft_out[fft_size - j]; fft_out[j] += v; } if speed_up { // scale down in the frequency domain results in a speed up in the time domain for j in 0..n_fft { fft_out[j] = half * (fft_out[2 * j] + fft_out[2 * j + 1]); } } // mel spectrogram for j in 0..n_mel { let mut sum = zero; for k in 0..n_fft { sum += fft_out[k] * filters[j * n_fft + k]; } mel[j * n_len + i] = T::max(sum, T::from(1e-10).unwrap()).log10(); } } mel } fn log_mel_spectrogram_<T: Float + std::fmt::Display>( samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, n_mel: usize, speed_up: bool, ) -> Vec<T> { let zero = T::zero(); let two_pi = T::PI() + T::PI(); let half = T::from(0.5).unwrap(); let one = T::from(1.0).unwrap(); let four = T::from(4.0).unwrap(); let fft_size_t = T::from(fft_size).unwrap(); let hann: Vec<T> = (0..fft_size) .map(|i| half * (one - ((two_pi * T::from(i).unwrap()) / fft_size_t).cos())) .collect(); let n_len = samples.len() / fft_step; // pad audio with at least one extra chunk of zeros let pad = 100 * worker::m::CHUNK_LENGTH / 2; let n_len = if n_len % pad != 0 { (n_len / pad + 1) * pad } else { n_len }; let n_len = n_len + pad; let samples = { let mut samples_padded = samples.to_vec(); let to_add = n_len * fft_step - samples.len(); samples_padded.extend(std::iter::repeat(zero).take(to_add)); samples_padded }; // Use a single thread for now. let mut mel = log_mel_spectrogram_w( 0, &hann, &samples, filters, fft_size, fft_step, speed_up, n_len, n_mel, 1, ); let mmax = mel .iter() .max_by(|&u, &v| u.partial_cmp(v).unwrap_or(std::cmp::Ordering::Greater)) .copied() .unwrap_or(zero) - T::from(8).unwrap(); for m in mel.iter_mut() { let v = T::max(*m, mmax); *m = v / four + one } mel } pub fn pcm_to_mel<T: Float + std::fmt::Display>( cfg: &worker::m::Config, samples: &[T], filters: &[T], ) -> anyhow::Result<Vec<T>> { let mel = log_mel_spectrogram_( samples, filters, worker::m::N_FFT, worker::m::HOP_LENGTH, cfg.num_mel_bins, false, ); Ok(mel) }
candle/candle-wasm-examples/whisper/src/audio.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/audio.rs", "repo_id": "candle", "token_count": 3162 }
54
use yew_agent::PublicWorker; fn main() { console_error_panic_hook::set_once(); candle_wasm_example_yolo::Worker::register(); }
candle/candle-wasm-examples/yolo/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/bin/worker.rs", "repo_id": "candle", "token_count": 53 }
55
MONGODB_URL=mongodb://localhost:27017/
chat-ui/.env.ci/0
{ "file_path": "chat-ui/.env.ci", "repo_id": "chat-ui", "token_count": 16 }
56
# syntax=docker/dockerfile:1 # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker # you will also find guides on how best to write your Dockerfile ARG INCLUDE_DB=false # stage that install the dependencies FROM node:20 AS builder-production WORKDIR /app COPY --link --chown=1000 package-lock.json package.json ./ RUN --mount=type=cache,target=/app/.npm \ npm set cache /app/.npm && \ npm ci --omit=dev FROM builder-production AS builder ARG APP_BASE= ARG PUBLIC_APP_COLOR=blue ENV BODY_SIZE_LIMIT=15728640 RUN --mount=type=cache,target=/app/.npm \ npm set cache /app/.npm && \ npm ci COPY --link --chown=1000 . . RUN npm run build # mongo image FROM mongo:latest AS mongo # image to be used if INCLUDE_DB is false FROM node:20-slim AS local_db_false # image to be used if INCLUDE_DB is true FROM node:20-slim AS local_db_true RUN apt-get update RUN apt-get install gnupg curl -y # copy mongo from the other stage COPY --from=mongo /usr/bin/mongo* /usr/bin/ ENV MONGODB_URL=mongodb://localhost:27017 RUN mkdir -p /data/db RUN chown -R 1000:1000 /data/db # final image FROM local_db_${INCLUDE_DB} AS final # build arg to determine if the database should be included ARG INCLUDE_DB=false ENV INCLUDE_DB=${INCLUDE_DB} # svelte requires APP_BASE at build time so it must be passed as a build arg ARG APP_BASE= # tailwind requires the primary theme to be known at build time so it must be passed as a build arg ARG PUBLIC_APP_COLOR=blue ENV BODY_SIZE_LIMIT=15728640 # install dotenv-cli RUN npm install -g dotenv-cli # switch to a user that works for spaces RUN userdel -r node RUN useradd -m -u 1000 user USER user ENV HOME=/home/user \ PATH=/home/user/.local/bin:$PATH WORKDIR /app # add a .env.local if the user doesn't bind a volume to it RUN touch /app/.env.local # get the default config, the entrypoint script and the server script COPY --chown=1000 package.json /app/package.json COPY --chown=1000 .env /app/.env COPY --chown=1000 entrypoint.sh /app/entrypoint.sh COPY --chown=1000 gcp-*.json /app/ #import the build & dependencies COPY --from=builder --chown=1000 /app/build /app/build COPY --from=builder --chown=1000 /app/node_modules /app/node_modules RUN npx playwright install USER root RUN npx playwright install-deps USER user RUN chmod +x /app/entrypoint.sh CMD ["/bin/bash", "-c", "/app/entrypoint.sh"]
chat-ui/Dockerfile/0
{ "file_path": "chat-ui/Dockerfile", "repo_id": "chat-ui", "token_count": 900 }
57
image: repository: ghcr.io/huggingface name: chat-ui tag: 0.0.0-latest pullPolicy: IfNotPresent replicas: 3 domain: huggingface.co service: type: NodePort annotations: { } serviceAccount: enabled: false create: false name: "" automountServiceAccountToken: true annotations: { } ingress: enabled: true path: "/" annotations: { } # className: "nginx" tls: { } # secretName: XXX resources: requests: cpu: 2 memory: 4Gi limits: cpu: 2 memory: 4Gi nodeSelector: {} tolerations: [] envVars: { } infisical: enabled: false env: "" project: "huggingchat-v2-a1" url: "" resyncInterval: 60 operatorSecretName: "huggingchat-operator-secrets" operatorSecretNamespace: "hub-utils" # Allow to environment injections on top or instead of infisical extraEnvFrom: [] extraEnv: [] autoscaling: enabled: false minReplicas: 1 maxReplicas: 2 targetMemoryUtilizationPercentage: "" targetCPUUtilizationPercentage: "" monitoring: enabled: false
chat-ui/chart/values.yaml/0
{ "file_path": "chat-ui/chart/values.yaml", "repo_id": "chat-ui", "token_count": 376 }
58
# Text Generation Inference (TGI) | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | Yes\* | | [Multimodal](../multimodal) | Yes\* | \* Tools are only supported with the Cohere Command R+ model with the Xenova tokenizers. Please see the [Tools](../tools) section. \* Multimodal is only supported with the IDEFICS model. Please see the [Multimodal](../multimodal) section. By default, if `endpoints` are left unspecified, Chat UI will look for the model on the hosted Hugging Face inference API using the model name, and use your `HF_TOKEN`. Refer to the [overview](/configuration/models/overview) for more information about model configuration. ```ini MODELS=`[ { "name": "mistralai/Mistral-7B-Instruct-v0.2", "displayName": "mistralai/Mistral-7B-Instruct-v0.2", "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.", "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/", "preprompt": "", "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.3, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` ``` ## Running your own models using a custom endpoint If you want to, instead of hitting models on the Hugging Face Inference API, you can run your own models locally. A good option is to hit a [text-generation-inference](https://github.com/huggingface/text-generation-inference) endpoint. This is what is done in the official [Chat UI Spaces Docker template](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) for instance: both this app and a text-generation-inference server run inside the same container. To do this, you can add your own endpoints to the `MODELS` variable in `.env.local`, by adding an `"endpoints"` key for each model in `MODELS`. ```ini MODELS=`[{ "name": "your-model-name", "displayName": "Your Model Name", ... other model config "endpoints": [{ "type" : "tgi", "url": "https://HOST:PORT", }] }] ```
chat-ui/docs/source/configuration/models/providers/tgi.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/tgi.md", "repo_id": "chat-ui", "token_count": 1067 }
59
export default { plugins: { tailwindcss: {}, autoprefixer: {}, }, };
chat-ui/postcss.config.js/0
{ "file_path": "chat-ui/postcss.config.js", "repo_id": "chat-ui", "token_count": 34 }
60
<script lang="ts"> import { onDestroy } from "svelte"; import IconCopy from "./icons/IconCopy.svelte"; import Tooltip from "./Tooltip.svelte"; export let classNames = ""; export let value: string; let isSuccess = false; let timeout: ReturnType<typeof setTimeout>; const handleClick = async () => { // writeText() can be unavailable or fail in some cases (iframe, etc) so we try/catch try { await navigator.clipboard.writeText(value); isSuccess = true; if (timeout) { clearTimeout(timeout); } timeout = setTimeout(() => { isSuccess = false; }, 1000); } catch (err) { console.error(err); } }; onDestroy(() => { if (timeout) { clearTimeout(timeout); } }); </script> <button class={classNames} title={"Copy to clipboard"} type="button" on:click on:click={handleClick} > <div class="relative"> <slot> <IconCopy classNames="h-[1.14em] w-[1.14em]" /> </slot> <Tooltip classNames={isSuccess ? "opacity-100" : "opacity-0"} /> </div> </button>
chat-ui/src/lib/components/CopyToClipBoardBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/CopyToClipBoardBtn.svelte", "repo_id": "chat-ui", "token_count": 394 }
61
<script lang="ts"> import CarbonStopFilledAlt from "~icons/carbon/stop-filled-alt"; export let classNames = ""; </script> <button type="button" on:click class="btn flex h-8 rounded-lg border bg-white px-3 py-1 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-600 {classNames}" > <CarbonStopFilledAlt class="-ml-1 mr-1 h-[1.25rem] w-[1.1875rem] text-gray-300" /> Stop generating </button>
chat-ui/src/lib/components/StopGeneratingBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/StopGeneratingBtn.svelte", "repo_id": "chat-ui", "token_count": 170 }
62
<script lang="ts"> import CarbonImage from "~icons/carbon/image"; // import EosIconsLoading from "~icons/eos-icons/loading"; export let files: File[]; export let mimeTypes: string[] = []; export let onDrag = false; export let onDragInner = false; async function dropHandle(event: DragEvent) { event.preventDefault(); if (event.dataTransfer && event.dataTransfer.items) { // Use DataTransferItemList interface to access the file(s) if (files.length > 0) { files = []; } if (event.dataTransfer.items[0].kind === "file") { for (let i = 0; i < event.dataTransfer.items.length; i++) { const file = event.dataTransfer.items[i].getAsFile(); if (file) { // check if the file matches the mimeTypes // else abort if ( !mimeTypes.some((mimeType: string) => { const [type, subtype] = mimeType.split("/"); const [fileType, fileSubtype] = file.type.split("/"); return type === fileType && (subtype === "*" || fileSubtype === subtype); }) ) { setErrorMsg(`Some file type not supported. Only allowed: ${mimeTypes.join(", ")}`); files = []; return; } // if file is bigger than 10MB abort if (file.size > 10 * 1024 * 1024) { setErrorMsg("Some file is too big. (10MB max)"); files = []; return; } // add the file to the files array files = [...files, file]; } } onDrag = false; } } } function setErrorMsg(errorMsg: string) { onDrag = false; alert(errorMsg); } </script> <div id="dropzone" role="form" on:drop={dropHandle} on:dragenter={() => (onDragInner = true)} on:dragleave={() => (onDragInner = false)} on:dragover|preventDefault class="relative flex h-28 w-full max-w-4xl flex-col items-center justify-center gap-1 rounded-xl border-2 border-dotted {onDragInner ? 'border-blue-200 !bg-blue-500/10 text-blue-600 *:pointer-events-none dark:border-blue-600 dark:bg-blue-500/20 dark:text-blue-500' : 'bg-gray-100 text-gray-500 dark:border-gray-500 dark:bg-gray-700 dark:text-gray-400'}" > <CarbonImage class="text-xl" /> <p>Drop File to add to chat</p> </div>
chat-ui/src/lib/components/chat/FileDropzone.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/FileDropzone.svelte", "repo_id": "chat-ui", "token_count": 906 }
63
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; /** * Returns the lock id if the lock was acquired, false otherwise */ export async function acquireLock(key: string): Promise<ObjectId | false> { try { const id = new ObjectId(); const insert = await collections.semaphores.insertOne({ _id: id, key, createdAt: new Date(), updatedAt: new Date(), }); return insert.acknowledged ? id : false; // true if the document was inserted } catch (e) { // unique index violation, so there must already be a lock return false; } } export async function releaseLock(key: string, lockId: ObjectId) { await collections.semaphores.deleteOne({ _id: lockId, key, }); } export async function isDBLocked(key: string): Promise<boolean> { const res = await collections.semaphores.countDocuments({ key, }); return res > 0; } export async function refreshLock(key: string, lockId: ObjectId): Promise<boolean> { const result = await collections.semaphores.updateOne( { _id: lockId, key, }, { $set: { updatedAt: new Date(), }, } ); return result.matchedCount > 0; }
chat-ui/src/lib/migrations/lock.ts/0
{ "file_path": "chat-ui/src/lib/migrations/lock.ts", "repo_id": "chat-ui", "token_count": 400 }
64
import { z } from "zod"; import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints"; import { chunk } from "$lib/utils/chunk"; import { env } from "$env/dynamic/private"; export const embeddingEndpointOpenAIParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("openai"), url: z.string().url().default("https://api.openai.com/v1/embeddings"), apiKey: z.string().default(env.OPENAI_API_KEY), defaultHeaders: z.record(z.string()).default({}), }); export async function embeddingEndpointOpenAI( input: z.input<typeof embeddingEndpointOpenAIParametersSchema> ): Promise<EmbeddingEndpoint> { const { url, model, apiKey, defaultHeaders } = embeddingEndpointOpenAIParametersSchema.parse(input); const maxBatchSize = model.maxBatchSize || 100; return async ({ inputs }) => { const requestURL = new URL(url); const batchesInputs = chunk(inputs, maxBatchSize); const batchesResults = await Promise.all( batchesInputs.map(async (batchInputs) => { const response = await fetch(requestURL, { method: "POST", headers: { Accept: "application/json", "Content-Type": "application/json", ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}), ...defaultHeaders, }, body: JSON.stringify({ input: batchInputs, model: model.name }), }); const embeddings: Embedding[] = []; const responseObject = await response.json(); for (const embeddingObject of responseObject.data) { embeddings.push(embeddingObject.embedding); } return embeddings; }) ); const flatAllEmbeddings = batchesResults.flat(); return flatAllEmbeddings; }; }
chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 620 }
65
import { env } from "$env/dynamic/private"; import { buildPrompt } from "$lib/buildPrompt"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; import { logger } from "$lib/server/logger"; export const endpointLlamacppParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("llamacpp"), url: z.string().url().default("http://127.0.0.1:8080"), // legacy, feel free to remove in breaking change update baseURL: z.string().url().optional(), accessToken: z .string() .min(1) .default(env.HF_TOKEN ?? env.HF_ACCESS_TOKEN), }); export function endpointLlamacpp( input: z.input<typeof endpointLlamacppParametersSchema> ): Endpoint { const { baseURL, url, model } = endpointLlamacppParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage, generateSettings }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const parameters = { ...model.parameters, ...generateSettings }; const r = await fetch(`${baseURL ?? url}/completion`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ prompt, stream: true, temperature: parameters.temperature, top_p: parameters.top_p, top_k: parameters.top_k, stop: parameters.stop, repeat_penalty: parameters.repetition_penalty, n_predict: parameters.max_new_tokens, cache_prompt: true, }), }); if (!r.ok) { throw new Error(`Failed to generate text: ${await r.text()}`); } const encoder = new TextDecoderStream(); const reader = r.body?.pipeThrough(encoder).getReader(); return (async function* () { let stop = false; let generatedText = ""; let tokenId = 0; let accumulatedData = ""; // Buffer to accumulate data chunks while (!stop) { // Read the stream and log the outputs to console const out = (await reader?.read()) ?? { done: false, value: undefined }; // If it's done, we cancel if (out.done) { reader?.cancel(); return; } if (!out.value) { return; } // Accumulate the data chunk accumulatedData += out.value; // Process each complete JSON object in the accumulated data while (accumulatedData.includes("\n")) { // Assuming each JSON object ends with a newline const endIndex = accumulatedData.indexOf("\n"); let jsonString = accumulatedData.substring(0, endIndex).trim(); // Remove the processed part from the buffer accumulatedData = accumulatedData.substring(endIndex + 1); if (jsonString.startsWith("data: ")) { jsonString = jsonString.slice(6); let data = null; try { data = JSON.parse(jsonString); } catch (e) { logger.error(e, "Failed to parse JSON"); logger.error(jsonString, "Problematic JSON string:"); continue; // Skip this iteration and try the next chunk } // Handle the parsed data if (data.content || data.stop) { generatedText += data.content; const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: data.content ?? "", logprob: 0, special: false, }, generated_text: data.stop ? generatedText : null, details: null, }; if (data.stop) { stop = true; output.token.special = true; reader?.cancel(); } yield output; } } } } })(); }; } export default endpointLlamacpp;
chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts", "repo_id": "chat-ui", "token_count": 1462 }
66
import { dot } from "@huggingface/transformers"; import type { EmbeddingBackendModel } from "$lib/server/embeddingModels"; import type { Embedding } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; // see here: https://github.com/nmslib/hnswlib/blob/359b2ba87358224963986f709e593d799064ace6/README.md?plain=1#L34 export function innerProduct(embeddingA: Embedding, embeddingB: Embedding) { return 1.0 - dot(embeddingA, embeddingB); } export async function getSentenceSimilarity( embeddingModel: EmbeddingBackendModel, query: string, sentences: string[] ): Promise<{ distance: number; embedding: Embedding; idx: number }[]> { const inputs = [ `${embeddingModel.preQuery}${query}`, ...sentences.map((sentence) => `${embeddingModel.prePassage}${sentence}`), ]; const embeddingEndpoint = await embeddingModel.getEndpoint(); const output = await embeddingEndpoint({ inputs }).catch((err) => { throw Error("Failed to generate embeddings for sentence similarity", { cause: err }); }); const queryEmbedding: Embedding = output[0]; const sentencesEmbeddings: Embedding[] = output.slice(1); return sentencesEmbeddings.map((sentenceEmbedding, idx) => ({ distance: innerProduct(queryEmbedding, sentenceEmbedding), embedding: sentenceEmbedding, idx, })); }
chat-ui/src/lib/server/sentenceSimilarity.ts/0
{ "file_path": "chat-ui/src/lib/server/sentenceSimilarity.ts", "repo_id": "chat-ui", "token_count": 433 }
67