text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Llava-NeXT model.""" import gc import unittest import requests from huggingface_hub import hf_hub_download from transformers import ( AutoProcessor, LlavaNextConfig, LlavaNextForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( require_bitsandbytes, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch from transformers.models.llava_next.modeling_llava_next import image_size_to_num_patches else: is_torch_greater_or_equal_than_2_0 = False if is_vision_available(): from PIL import Image class LlavaNextVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=7, vision_feature_select_strategy="default", vision_feature_layer=-1, text_config={ "model_type": "llama", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 580, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 0, }, is_training=True, vision_config={ "image_size": 16, "patch_size": 2, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = 3 self.image_size = 30 self.encoder_seq_length = 342 self.image_grid_pinpoints = [[32, 32]] def get_config(self): return LlavaNextConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, image_grid_pinpoints=self.image_grid_pinpoints, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, 5, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) # we are giving 3 images let's make sure we pass in 3 image tokens input_ids[:, 1] = config.image_token_index labels = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device) # maskout where the image token is labels[:, 1] == self.ignore_index inputs_dict = { "pixel_values": pixel_values, "image_sizes": torch.tensor( [[self.vision_config["image_size"], self.vision_config["image_size"]]] * self.batch_size ), "input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, } return config, inputs_dict def create_and_check_llava_next_model_fp16_forward( self, config, input_ids, pixel_values, attention_mask, image_sizes ): model = LlavaNextForConditionalGeneration(config=config) model.to(torch_device) model.half() model.eval() logits = model( input_ids=input_ids, attention_mask=attention_mask, image_sizes=image_sizes, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) def create_and_check_llava_next_model_fp16_autocast_forward( self, config, input_ids, pixel_values, attention_mask, image_sizes ): config.torch_dtype = torch.float16 model = LlavaNextForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, image_sizes=image_sizes, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class LlavaNextForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `LlavaNextForConditionalGeneration`. """ all_model_classes = (LlavaNextForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = LlavaNextVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=LlavaNextConfig, has_text_modality=False) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "image_newline" in name: continue elif param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Feedforward chunking is not yet supported") def test_feed_forward_chunking(self): pass @unittest.skip(reason="CPU offload is not yet supported") def test_cpu_offload(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") def test_sdpa_can_dispatch_on_flash(self): pass @require_torch class LlavaNextForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true" self.image = Image.open(requests.get(url, stream=True).raw) self.prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow @require_bitsandbytes def test_small_model_integration_test(self): model = LlavaNextForConditionalGeneration.from_pretrained( "llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True, ) inputs = self.processor(self.prompt, self.image, return_tensors="pt") # verify inputs against original implementation filepath = hf_hub_download( repo_id="nielsr/test-image", filename="llava_1_6_input_ids.pt", repo_type="dataset", ) original_input_ids = torch.load(filepath, map_location="cpu") # replace -200 by image_token_index (since we use token ID = 32000 for the image token) original_input_ids[original_input_ids == -200] = model.config.image_token_index assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist() filepath = hf_hub_download( repo_id="nielsr/test-image", filename="llava_1_6_pixel_values.pt", repo_type="dataset", ) original_pixel_values = torch.load(filepath, map_location="cpu") assert torch.allclose(original_pixel_values, inputs.pixel_values.half()) # verify single forward pass inputs = inputs.to(torch_device) with torch.no_grad(): output = model(**inputs) expected_slice = torch.tensor( [ [-4.7695, -4.5664, -0.2786], [-10.6250, -10.8906, -2.5254], [-6.7383, -7.2461, -0.6787], ], dtype=torch.float32, device=torch_device, ) assert torch.allclose(output.logits[0, :3, :3], expected_slice, atol=1e-3) # verify generation output = model.generate(**inputs, max_new_tokens=100) EXPECTED_DECODED_TEXT = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays values for multiple quantitative variables represented on axes starting from the same point. This particular radar chart is showing the performance of various models or systems across different metrics or datasets.\n\nThe chart is divided into several sections, each representing a different model or dataset. The axes represent different metrics or datasets, such as "MMM-Vet," "MMM-Bench," "L' # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch(self): model = LlavaNextForConditionalGeneration.from_pretrained( "llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" cats_image = Image.open(requests.get(url, stream=True).raw) inputs = self.processor( [self.prompt, self.prompt], images=[self.image, cats_image], return_tensors="pt", padding=True, ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = ['[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays', '[INST] \nWhat is shown in this image? [/INST] The image shows two cats lying on a pink surface, which appears to be a couch or a cush'] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_unk_token(self): # related to (#29835) model = LlavaNextForConditionalGeneration.from_pretrained( "llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True, ) prompt_with_unk = "[INST] <image>\nWhat is shown in this <unk> image? [/INST]" inputs = self.processor(prompt_with_unk, self.image, return_tensors="pt") # verify single forward pass inputs = inputs.to(torch_device) with torch.no_grad(): output = model(**inputs) # verify generation output = model.generate(**inputs, max_new_tokens=40) EXPECTED_DECODED_TEXT = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays values for multiple quantitative variables represented on axes starting from the same point. This particular radar chart' # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch_different_resolutions(self): model = LlavaNextForConditionalGeneration.from_pretrained( "llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True, ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e" cats_image = Image.open(requests.get(url, stream=True).raw) lowres_img = Image.open(requests.get(lowres_url, stream=True).raw) inputs = self.processor( [self.prompt, self.prompt], images=[lowres_img, cats_image], return_tensors="pt", padding=True ).to(torch_device) pixel_values = inputs["pixel_values"] # verify pixel values are padded correctly with 0 when one image has more num_patches than the other image_num_patches = [ image_size_to_num_patches( image_size=imsize, grid_pinpoints=model.config.image_grid_pinpoints, patch_size=model.config.vision_config.image_size, ) for imsize in inputs["image_sizes"] ] for pix_val, num_patch in zip(pixel_values, image_num_patches): self.assertTrue(torch.all(pix_val[num_patch:] == 0)) # pad on the right for i in range(num_patch): self.assertFalse(torch.all(pix_val[i : i + 1] == 0)) # no padding expected in any of patches # check loss when labels are passed inputs["labels"] = inputs["input_ids"].clone() with torch.no_grad(): output = model(**inputs) expected_slice = torch.tensor( [[-0.0308, -0.0313, -0.0314], [-0.3064, -0.3013, -0.2986], [-0.1226, -0.1246, -0.1210]], dtype=torch.float32, device=torch_device, ) assert torch.allclose(output.logits[0, -3:, -3:], expected_slice, atol=1e-3) assert torch.allclose(output.loss, torch.tensor(6.8619, device=torch_device)) # verify generation output = model.generate(**inputs, max_new_tokens=50) EXPECTED_DECODED_TEXT = '[INST] \nWhat is shown in this image? [/INST] The image shows a forested area with a misty or foggy atmosphere. In the foreground, there is a grassy field with a few deer grazing. The deer are partially obscured by the fog, and the trees in the background' # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch_matches_single(self): model = LlavaNextForConditionalGeneration.from_pretrained( "llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True, ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e" cats_image = Image.open(requests.get(url, stream=True).raw) lowres_img = Image.open(requests.get(lowres_url, stream=True).raw) inputs_batched = self.processor( [self.prompt, self.prompt], images=[lowres_img, cats_image], return_tensors="pt", padding=True ).to(torch_device) inputs_single = self.processor(self.prompt, images=lowres_img, return_tensors="pt", padding=True).to( torch_device ) # verify generation output_batched = model.generate(**inputs_batched, max_new_tokens=50) output_single = model.generate(**inputs_single, max_new_tokens=50) self.assertEqual( self.processor.decode(output_batched[0], skip_special_tokens=True), self.processor.decode(output_single[0], skip_special_tokens=True), ) @slow @require_bitsandbytes def test_padding_side_when_merging_inputs(self): model = LlavaNextForConditionalGeneration.from_pretrained( "llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True, ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e" cats_image = Image.open(requests.get(url, stream=True).raw) lowres_img = Image.open(requests.get(lowres_url, stream=True).raw) inputs_batched = self.processor( [self.prompt, self.prompt], images=[lowres_img, cats_image], return_tensors="pt", padding=True ).to(torch_device) # model is in eval mode by default so we should get pad on the left side # we can check the first hidden-states (aka inputs embeds) # the first element was lo-res image and we expect the first 1414 tokens to be all pads output_eval = model(**inputs_batched, output_hidden_states=True) self.assertTrue((output_eval.hidden_states[0][0, :1414, ...] == 0).all().item()) # otherwise padding is on the right side, so it's last 1414 tokens self.processor.padding_side = "right" inputs_batched = self.processor( [self.prompt, self.prompt], images=[lowres_img, cats_image], return_tensors="pt", padding=True ).to(torch_device) model.train() with torch.no_grad(): output_train = model(**inputs_batched, output_hidden_states=True) self.assertTrue((output_train.hidden_states[0][0, -1414:, ...] == 0).all().item()) with self.assertLogs("transformers", level="WARNING") as logs: model.padding_side = "left" model.train() model(**inputs_batched, output_hidden_states=True) self.assertIn( "Padding side is set to 'left' but the model is in training mode. For training", logs.output[0] ) with self.assertLogs("transformers", level="WARNING") as logs: model.padding_side = "right" model.eval() model(**inputs_batched, output_hidden_states=True) self.assertIn( "Padding side is set to 'right' but the model is in inference mode. For correct", logs.output[0] ) @slow @require_bitsandbytes def test_expansion_in_processing(self): model_id = "llava-hf/llava-v1.6-mistral-7b-hf" model = LlavaNextForConditionalGeneration.from_pretrained(model_id, load_in_4bit=True) processor = AutoProcessor.from_pretrained(model_id) prompt = "USER: <image>\nDescribe the image:\nASSISTANT:" image_file = "http://images.cocodataset.org/val2017/000000039769.jpg" raw_image = Image.open(requests.get(image_file, stream=True).raw) # check processing with expansion of inputs processor.vision_feature_select_strategy = "default" processor.patch_size = 14 inputs_expanded = processor(prompt, raw_image, return_tensors="pt").to(torch_device, torch.float16) self.assertTrue(inputs_expanded.input_ids.shape[-1] == 2356) # check processing without expansion of inputs (legacy behavior) processor.vision_feature_select_strategy = None processor.patch_size = None inputs = processor(prompt, raw_image, return_tensors="pt").to(torch_device, torch.float16) self.assertTrue(inputs.input_ids.shape[-1] == 17) # generate exactly 20 tokens output = model.generate(**inputs, min_new_tokens=20, max_new_tokens=20) output_expanded = model.generate(**inputs_expanded, min_new_tokens=20, max_new_tokens=20) # check that both inputs are handled correctly and generate the same output self.assertListEqual(output_expanded[:, -20:].tolist(), output[:, -20:].tolist())
transformers/tests/models/llava_next/test_modeling_llava_next.py/0
{ "file_path": "transformers/tests/models/llava_next/test_modeling_llava_next.py", "repo_id": "transformers", "token_count": 10995 }
413
# coding=utf-8 # Copyright 2018 LXMERT Authors, The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np from transformers import LxmertConfig, is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, ) if is_tf_available(): import tensorflow as tf class LxmertModelTester: def __init__( self, parent, vocab_size=300, hidden_size=28, num_attention_heads=2, num_labels=2, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, num_qa_labels=30, num_object_labels=16, num_attr_labels=4, num_visual_features=10, l_layers=2, x_layers=1, r_layers=1, visual_feat_dim=128, visual_pos_dim=4, visual_loss_normalizer=6.67, seq_length=20, batch_size=4, is_training=True, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, use_token_type_ids=True, use_lang_mask=True, output_attentions=False, output_hidden_states=False, scope=None, ): self.parent = parent self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_labels = num_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pad_token_id = pad_token_id self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.seq_length = seq_length self.batch_size = batch_size self.is_training = is_training self.use_lang_mask = use_lang_mask self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_visual_features = num_visual_features self.use_token_type_ids = use_token_type_ids self.output_attentions = output_attentions self.output_hidden_states = output_hidden_states self.scope = scope self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} def prepare_config_and_inputs(self): output_attentions = self.output_attentions input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size) visual_feats = torch.rand(self.batch_size, self.num_visual_features, self.visual_feat_dim, device=torch_device) bounding_boxes = torch.rand(self.batch_size, self.num_visual_features, 4, device=torch_device) input_mask = None if self.use_lang_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) obj_labels = None if self.task_obj_predict: obj_labels = {} if self.visual_attr_loss and self.task_obj_predict: obj_labels["attr"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ) if self.visual_feat_loss and self.task_obj_predict: obj_labels["feat"] = ( ids_tensor( [self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features ), ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features), ) if self.visual_obj_loss and self.task_obj_predict: obj_labels["obj"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ) ans = None if self.task_qa: ans = ids_tensor([self.batch_size], self.num_qa_labels) masked_lm_labels = None if self.task_mask_lm: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) matched_label = None if self.task_matched: matched_label = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) def get_config(self): return LxmertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, num_labels=self.num_labels, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, num_qa_labels=self.num_qa_labels, num_object_labels=self.num_object_labels, num_attr_labels=self.num_attr_labels, l_layers=self.l_layers, x_layers=self.x_layers, r_layers=self.r_layers, visual_feat_dim=self.visual_feat_dim, visual_pos_dim=self.visual_pos_dim, visual_loss_normalizer=self.visual_loss_normalizer, task_matched=self.task_matched, task_mask_lm=self.task_mask_lm, task_obj_predict=self.task_obj_predict, task_qa=self.task_qa, visual_obj_loss=self.visual_obj_loss, visual_attr_loss=self.visual_attr_loss, visual_feat_loss=self.visual_feat_loss, output_attentions=self.output_attentions, output_hidden_states=self.output_hidden_states, ) def create_and_check_lxmert_model( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=not output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, return_dict=False) result = model(input_ids, visual_feats, bounding_boxes, return_dict=True) self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size) ) self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_lxmert_for_question_answering( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, labels=ans) result = model( input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.question_answering_score.shape, (self.batch_size, self.num_qa_labels)) def create_and_check_lxmert_for_pretraining( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, output_attentions=not output_attentions, return_dict=False, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, obj_labels=obj_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, matched_label=matched_label, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def resize_lxmert_num_qa_labels( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): start_labels = config.num_qa_labels num_large_labels = config.num_qa_labels * 2 num_small_labels = int(config.num_qa_labels * 2) less_labels_ans = ids_tensor([self.batch_size], num_small_labels) more_labels_ans = ids_tensor([self.batch_size], num_large_labels) model_pretrain = LxmertForPreTraining(config=config).to(torch_device) model_qa = LxmertForQuestionAnswering(config=config).to(torch_device) config.num_labels = num_small_labels end_labels = config.num_labels result_pretrain = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result_qa = model_qa( input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_pretrain.resize_num_qa_labels(num_small_labels) model_qa.resize_num_qa_labels(num_small_labels) result_pretrain_less = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=less_labels_ans, ) result_qa_less = model_qa( input_ids, visual_feats, bounding_boxes, labels=less_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_pretrain.resize_num_qa_labels(num_large_labels) model_qa.resize_num_qa_labels(num_large_labels) result_pretrain_more = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=more_labels_ans, ) result_qa_more = model_qa( input_ids, visual_feats, bounding_boxes, labels=more_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_qa_labels = model_qa.num_qa_labels self.parent.assertNotEqual(start_labels, end_labels) self.parent.assertNotEqual(model_qa_labels, start_labels) self.parent.assertEqual(result_qa.question_answering_score.shape, (self.batch_size, start_labels)) self.parent.assertEqual(result_pretrain.question_answering_score.shape, (self.batch_size, start_labels)) self.parent.assertEqual(result_qa_less.question_answering_score.shape, (self.batch_size, num_small_labels)) self.parent.assertEqual( result_pretrain_less.question_answering_score.shape, (self.batch_size, num_small_labels) ) self.parent.assertEqual(result_qa_more.question_answering_score.shape, (self.batch_size, num_large_labels)) self.parent.assertEqual( result_pretrain_more.question_answering_score.shape, (self.batch_size, num_large_labels) ) def prepare_config_and_inputs_for_common(self, return_obj_labels=False): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": bounding_boxes, "token_type_ids": token_type_ids, "attention_mask": input_mask, } if return_obj_labels: inputs_dict["obj_labels"] = obj_labels else: config.task_obj_predict = False return config, inputs_dict @require_torch class LxmertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LxmertModel, LxmertForPreTraining, LxmertForQuestionAnswering) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": LxmertModel, "question-answering": LxmertForQuestionAnswering} if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_torchscript = False # overwrite function because qa models takes different input label shape def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): # special case for models like BERT that use multi-loss training for PreTraining inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = LxmertModelTester(self) self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_lxmert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_model(*config_and_inputs) def test_lxmert_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_question_answering(*config_and_inputs) def test_lxmert_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs) def test_lxmert_question_answering_labels_resize(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.resize_lxmert_num_qa_labels(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "unc-nlp/lxmert-base-uncased" model = LxmertModel.from_pretrained(model_name) model.to(torch_device) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # 2 hidden states were added self.assertEqual(out_len + 2, len(outputs)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1] self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1) self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1) seq_length = self.model_tester.seq_length num_visual_features = self.model_tester.num_visual_features self.assertListEqual( list(language_hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) self.assertListEqual( list(vision_hidden_states[0].shape[-2:]), [num_visual_features, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) hidden_states_lang = outputs.language_hidden_states[0] attentions_lang = outputs.language_attentions[0] hidden_states_vision = outputs.vision_hidden_states[0] attentions_vision = outputs.vision_attentions[0] hidden_states_lang.retain_grad() attentions_lang.retain_grad() hidden_states_vision.retain_grad() attentions_vision.retain_grad() outputs.language_output.flatten()[0].backward(retain_graph=True) outputs.vision_output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states_lang.grad) self.assertIsNotNone(attentions_vision.grad) self.assertIsNotNone(hidden_states_vision.grad) self.assertIsNotNone(attentions_vision.grad) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, value in pt_inputs_dict.items(): # skip key that does not exist in tf if isinstance(value, dict): tf_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value) elif isinstance(value, (list, tuple)): tf_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value) elif isinstance(value, bool): tf_inputs_dict[key] = value elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) # other general float inputs elif value.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class LxmertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = LxmertModel.from_pretrained("unc-nlp/lxmert-base-uncased") input_ids = torch.tensor([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]]) num_visual_features = 10 _, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim) _, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4) visual_feats = torch.as_tensor(visual_feats, dtype=torch.float32) visual_pos = torch.as_tensor(visual_pos, dtype=torch.float32) output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0] expected_shape = torch.Size([1, 11, 768]) self.assertEqual(expected_shape, output.shape) expected_slice = torch.tensor( [[[0.2417, -0.9807, 0.1480], [1.2541, -0.8320, 0.5112], [1.4070, -1.1052, 0.6990]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/lxmert/test_modeling_lxmert.py/0
{ "file_path": "transformers/tests/models/lxmert/test_modeling_lxmert.py", "repo_id": "transformers", "token_count": 15327 }
414
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_bs4 from transformers.utils import is_bs4_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bs4_available(): from transformers import MarkupLMFeatureExtractor class MarkupLMFeatureExtractionTester(unittest.TestCase): def __init__(self, parent): self.parent = parent def prepare_feat_extract_dict(self): return {} def get_html_strings(): html_string_1 = """<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" html_string_2 = """ <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_1, html_string_2] @require_bs4 class MarkupLMFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MarkupLMFeatureExtractor if is_bs4_available() else None def setUp(self): self.feature_extract_tester = MarkupLMFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_call(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class() # Test not batched input html_string = get_html_strings()[0] encoding = feature_extractor(html_string) # fmt: off expected_nodes = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] expected_xpaths = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] # fmt: on self.assertEqual(encoding.nodes, expected_nodes) self.assertEqual(encoding.xpaths, expected_xpaths) # Test batched html_strings = get_html_strings() encoding = feature_extractor(html_strings) # fmt: off expected_nodes = expected_nodes + [['My First Heading', 'My first paragraph.']] expected_xpaths = expected_xpaths + [['/html/body/h1', '/html/body/p']] self.assertEqual(len(encoding.nodes), 2) self.assertEqual(len(encoding.xpaths), 2) self.assertEqual(encoding.nodes, expected_nodes) self.assertEqual(encoding.xpaths, expected_xpaths)
transformers/tests/models/markuplm/test_feature_extraction_markuplm.py/0
{ "file_path": "transformers/tests/models/markuplm/test_feature_extraction_markuplm.py", "repo_id": "transformers", "token_count": 1485 }
415
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the MusicGen processor.""" import random import shutil import tempfile import unittest import numpy as np from transformers import T5Tokenizer, T5TokenizerFast from transformers.testing_utils import require_sentencepiece, require_torch from transformers.utils.import_utils import is_speech_available if is_speech_available(): from transformers import EncodecFeatureExtractor, MusicgenProcessor global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_sentencepiece class MusicgenProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "facebook/musicgen-small" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return T5Tokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return EncodecFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = MusicgenProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, T5TokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = MusicgenProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = MusicgenProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, T5TokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(sequences=predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_decode_audio(self): feature_extractor = self.get_feature_extractor(padding_side="left") tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = [floats_list((1, x))[0] for x in range(5, 20, 5)] padding_mask = processor(raw_speech).padding_mask generated_speech = np.asarray(floats_list((3, 20)))[:, None, :] decoded_audios = processor.batch_decode(generated_speech, padding_mask=padding_mask) self.assertIsInstance(decoded_audios, list) for audio in decoded_audios: self.assertIsInstance(audio, np.ndarray) self.assertTrue(decoded_audios[0].shape == (1, 10)) self.assertTrue(decoded_audios[1].shape == (1, 15)) self.assertTrue(decoded_audios[2].shape == (1, 20))
transformers/tests/models/musicgen/test_processing_musicgen.py/0
{ "file_path": "transformers/tests/models/musicgen/test_processing_musicgen.py", "repo_id": "transformers", "token_count": 2474 }
416
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPT2Tokenizer, TFOPTForCausalLM, TFOPTModel def prepare_opt_inputs_dict(config, input_ids, attention_mask=None, head_mask=None): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class TFOPTModelTester: config_cls = OPTConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, word_embed_proj_dim=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) config = self.config_cls( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=False, **self.config_updates, ) inputs_dict = prepare_opt_inputs_dict(config, input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFOPTModel(config=config) input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) @require_tf class TFOPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFOPTForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) is_encoder_decoder = False test_pruning = False test_onnx = False onnx_min_opset = 10 def setUp(self): self.model_tester = TFOPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OPTConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build_in_name_scope() if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tf class TFOPTHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): eos_column_vector = tf.ones((4, 1), dtype=tf.int32) * 2 input_ids = tf.concat([ids_tensor((4, 6), self.vocab_size - 3) + 3, eos_column_vector], axis=1) batch_size = input_ids.shape[0] config = OPTConfig( vocab_size=self.vocab_size, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size @require_sentencepiece @require_tf class OPTModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = TFOPTModel.from_pretrained("facebook/opt-350m") input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.not_equal(input_ids, model.config.pad_token_id) with tf.GradientTape(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = (1, 11, 512) self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3], expected_slice, atol=4e-3)) xla_generate = tf.function(model, jit_compile=True) output = xla_generate(input_ids, attention_mask)[0] self.assertTrue(np.allclose(output[:, :3, :3], expected_slice, atol=4e-2)) @require_tf @slow class TFOPTEmbeddingsTest(unittest.TestCase): def setUp(self): super().setUp() self.path_model = "facebook/opt-350m" def test_logits(self): model = TFOPTForCausalLM.from_pretrained(self.path_model) tokenizer = GPT2Tokenizer.from_pretrained(self.path_model) prompts = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False inputs = tokenizer(prompts, return_tensors="tf", padding=True, add_special_tokens=False) logits = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask)[0], axis=-1) logits_meta = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) xla_generate = tf.function(model, jit_compile=True) logits = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask)[0], axis=-1) self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) @require_tf @slow class TFOPTGenerationTest(unittest.TestCase): @property def prompts(self): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def test_generation_pre_attn_layer_norm(self): model_id = "facebook/opt-125m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="tf").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) def test_batch_generation(self): model_id = "facebook/opt-350m" tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"]) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1], tf.int64) ) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence]) def test_generation_post_attn_layer_norm(self): model_id = "facebook/opt-350m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="tf").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
transformers/tests/models/opt/test_modeling_tf_opt.py/0
{ "file_path": "transformers/tests/models/opt/test_modeling_tf_opt.py", "repo_id": "transformers", "token_count": 7496 }
417
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import ProphetNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetTokenizer, ) from transformers.modeling_outputs import BaseModelOutput class ProphetNetModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, ngram=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 7 self.num_hidden_states_types = 3 # encoder, decoder_main, decoder_ngram self.decoder_attention_idx = 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ngram=self.ngram, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) return ( config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_decoder_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) # cross-attention + uni-directional self-attention def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 5) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_causal_lm_decoder( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForCausalLM(config=config).to(torch_device).eval() outputs = model( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_generate_with_past_key_value_states( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_decoder_generate_with_past_key_value_states( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=10, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=10, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [ProphetNetModel, ProphetNetForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them if model_class == ProphetNetForConditionalGeneration: model.prophetnet.encoder.load_state_dict(model.prophetnet.decoder.state_dict(), strict=False) else: model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_fast_integration( self, config, *args, ): input_ids = torch.tensor([[7, 4, 78, 0, 24, 52, 43]], device=torch_device, dtype=torch.long) decoder_input_ids = torch.tensor([[12, 62, 25, 11, 47, 15, 14]], device=torch_device, dtype=torch.long) attention_mask = torch.tensor([[1, 1, 1, 0, 1, 0, 0]], device=torch_device, dtype=torch.long) decoder_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 1, 0]], device=torch_device, dtype=torch.long) lm_labels = torch.tensor([[62, 25, 11, 47, 15, 14, 24]], device=torch_device, dtype=torch.long) torch.manual_seed(0) config.ngram = 4 model = ProphetNetForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertTrue(torch.allclose(result.loss, torch.tensor(4.5892, device=torch_device), atol=1e-3)) expected_logit_slice = torch.tensor( [-0.0184, 0.0758, -0.0543, -0.0093, 0.0050, -0.0660, -0.1453], device=torch_device ) self.parent.assertTrue(torch.allclose(result.logits[0, :, 1], expected_logit_slice, atol=1e-3)) def check_model_with_attn_mask(self, config, input_ids, decoder_input_ids, *args): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() outputs_no_mask = model(input_ids=input_ids[:, :5], decoder_input_ids=decoder_input_ids[:, :5]) attention_mask = torch.ones_like(input_ids) decoder_attention_mask = torch.ones_like(decoder_input_ids) attention_mask[:, 5:] = 0 outputs_with_mask = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # check encoder self.parent.assertTrue( torch.allclose( outputs_no_mask.encoder_last_hidden_state[0, :, 0], outputs_with_mask.encoder_last_hidden_state[0, :5, 0], atol=1e-3, ) ) # check decoder # main stream self.parent.assertTrue( torch.allclose( outputs_no_mask.last_hidden_state[0, :, 0], outputs_with_mask.last_hidden_state[0, :5, 0], atol=1e-3 ) ) # predict stream self.parent.assertTrue( torch.allclose( outputs_no_mask.last_hidden_state_ngram[0, :5, 0], outputs_with_mask.last_hidden_state_ngram[0, :5, 0], atol=1e-2, ) ) def check_causal_lm_from_pretrained( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, *args ): model = ProphetNetForConditionalGeneration(config).to(torch_device).eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) decoder = ProphetNetForCausalLM.from_pretrained(tmp_dirname).to(torch_device) encoder_hidden_states = model.prophetnet.encoder(input_ids).last_hidden_state model_outputs = model( encoder_outputs=BaseModelOutput(last_hidden_state=encoder_hidden_states), decoder_input_ids=decoder_input_ids, ) dec_outputs = decoder(encoder_hidden_states=encoder_hidden_states, input_ids=decoder_input_ids) self.parent.assertTrue( torch.allclose( model_outputs.logits[0, :5], dec_outputs.logits[0, :5], atol=1e-3, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict class ProphetNetStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, is_decoder=True, use_attention_mask=True, add_cross_attention=False, use_cache=False, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, ngram=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.use_cache = use_cache self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.add_cross_attention = add_cross_attention self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.num_hidden_states_types = 2 # decoder_main, decoder_ngram self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) config = ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ngram=self.ngram, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, add_cross_attention=self.add_cross_attention, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = ProphetNetDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = ProphetNetDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class ProphetNetStandaloneEncoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, is_decoder=False, use_attention_mask=True, add_cross_attention=False, use_cache=False, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.use_cache = use_cache self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.add_cross_attention = add_cross_attention self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 1 self.num_hidden_states_types = 1 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, add_cross_attention=self.add_cross_attention, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class ProphetNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetModel, ProphetNetForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (ProphetNetForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": ProphetNetModel, "summarization": ProphetNetForConditionalGeneration, "text-generation": ProphetNetForCausalLM, "text2text-generation": ProphetNetForConditionalGeneration, "translation": ProphetNetForConditionalGeneration, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False is_encoder_decoder = True # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `ProphetNetConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def setUp(self): self.model_tester = ProphetNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_lm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_only_decoder_causal_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_decoder(*config_and_inputs) def test_fast_integration(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_fast_integration(*config_and_inputs) def test_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) def test_shift_labels_via_shift_left(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) @unittest.skip(reason="Flaky test with no simple resolution. TODO Fix me @patrickvonplaten") def test_decoder_model_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_value_states(*config_and_inputs) def test_encoder_decoder_model_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_generate_with_past_key_value_states(*config_and_inputs) def test_attn_mask_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_model_with_attn_mask(*config_and_inputs) def test_config_save(self): config = self.model_tester.prepare_config_and_inputs()[0] config.add_cross_attention = False with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config = ProphetNetConfig.from_pretrained(tmp_dirname) self.assertFalse(config.add_cross_attention) def test_causal_lm_from_pretrained(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_causal_lm_from_pretrained(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) # methods overwrite method in `test_modeling_common.py` def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) correct_outlen = 7 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, (self.model_tester.ngram + 1) * decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) @unittest.skip(reason="Generating with head_masking has not been implemented for ProphetNet models yet.") def test_generate_with_head_masking(self): pass @require_torch class ProphetNetStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetDecoder, ProphetNetForCausalLM) if is_torch_available() else () all_generative_model_classes = (ProphetNetForCausalLM,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False is_encoder_decoder = False def setUp(self): self.model_tester = ProphetNetStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return @require_torch class ProphetNetStandaloneEncoderModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetEncoder,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False is_encoder_decoder = False def setUp(self): self.model_tester = ProphetNetStandaloneEncoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() @require_torch class ProphetNetModelIntegrationTest(unittest.TestCase): @slow def test_pretrained_checkpoint_hidden_states(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased") model.to(torch_device) # encoder-decoder outputs encoder_ids = torch.tensor( [ [ 2871, 102, 2048, 3176, 2780, 1997, 2871, 26727, 2169, 2097, 12673, 1996, 8457, 2006, 2049, 8240, 2859, 2799, 1012, 2023, 6512, 2038, 2174, 13977, 2195, 25962, 1012, 102, ] ] ).to(torch_device) decoder_prev_ids = torch.tensor([[102, 2129, 2116, 2372, 2024, 2006, 2169, 1997, 2122, 2048, 2780, 1029]]).to( torch_device ) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids, ) output_predited_logits = output[0] expected_shape = torch.Size((1, 12, 30522)) self.assertEqual(output_predited_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-7.7729, -8.0343, -8.26001], [-7.74213, -7.8629, -8.6000], [-7.7328, -7.8269, -8.5264]]] ).to(torch_device) # self.assertTrue(torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)) assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4) # encoder outputs encoder_outputs = model.prophetnet.encoder(encoder_ids)[0] expected_encoder_outputs_slice = torch.tensor( [[[-0.2526, -0.1951, -0.2185], [-0.8923, 0.2992, -0.4623], [-0.4585, 0.0165, -0.6652]]] ).to(torch_device) expected_shape_encoder = torch.Size((1, 28, 1024)) self.assertEqual(encoder_outputs.shape, expected_shape_encoder) # self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)) assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4) # decoder outputs decoder_outputs = model.prophetnet.decoder(decoder_prev_ids, encoder_hidden_states=encoder_outputs) predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1) predicting_streams_logits = model.lm_head(predicting_streams) next_first_stream_logits = predicting_streams_logits[:, 0] # self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)) assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4) @slow def test_cnndm_inference(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-cnndm") model.config.max_length = 512 model.to(torch_device) tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-cnndm") ARTICLE_TO_SUMMARIZE = ( "USTC was founded in Beijing by the Chinese Academy of Sciences (CAS) in September 1958. The Director of" " CAS, Mr. Guo Moruo was appointed the first president of USTC. USTC's founding mission was to develop a" " high-level science and technology workforce, as deemed critical for development of China's economy," ' defense, and science and technology education. The establishment was hailed as "A Major Event in the' ' History of Chinese Education and Science." CAS has supported USTC by combining most of its institutes' " with the departments of the university. USTC is listed in the top 16 national key universities, becoming" " the youngest national key university.".lower() ) input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=511, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) summary_ids = model.generate( input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) EXPECTED_SUMMARIZE_512 = ( "us ##tc was founded by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc is listed in the" " top 16 national key universities ." ) generated_titles = [ " ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids ] self.assertListEqual( [EXPECTED_SUMMARIZE_512], generated_titles, ) input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=99, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) # actually 98 tokens are used. max_length=100 contains bos and eos. summary_ids = model.generate( input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) EXPECTED_SUMMARIZE_100 = ( r"us ##tc was founded in beijing by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc " "'" " s founding mission was to develop a high - level science and technology workforce . [X_SEP]" ' establishment hailed as " a major event in the history of chinese education and science "' ) generated_titles = [ " ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids ] self.assertListEqual( [EXPECTED_SUMMARIZE_100], generated_titles, ) @slow def test_question_gen_inference(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg") model.to(torch_device) tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg") INPUTS = [ "Bill Gates [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", "1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", "April 4, 1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", ] input_ids = tokenizer(INPUTS, truncation=True, padding=True, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) gen_output = model.generate(input_ids, num_beams=5, early_stopping=True) generated_questions = tokenizer.batch_decode(gen_output, skip_special_tokens=True) EXPECTED_QUESTIONS = [ "along with paul allen, who founded microsoft?", "what year was microsoft founded?", "when was microsoft founded?", ] self.assertListEqual( EXPECTED_QUESTIONS, generated_questions, )
transformers/tests/models/prophetnet/test_modeling_prophetnet.py/0
{ "file_path": "transformers/tests/models/prophetnet/test_modeling_prophetnet.py", "repo_id": "transformers", "token_count": 25641 }
418
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import Qwen2VLImageProcessor class Qwen2VLImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=56, max_resolution=1024, min_pixels=56 * 56, max_pixels=28 * 28 * 1280, do_normalize=True, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, do_resize=True, patch_size=14, temporal_patch_size=2, merge_size=2, do_convert_rgb=True, ): self.parent = parent self.batch_size = batch_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_channels = num_channels self.image_mean = OPENAI_CLIP_MEAN self.image_std = OPENAI_CLIP_STD self.min_pixels = min_pixels self.max_pixels = max_pixels self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.merge_size = merge_size self.do_resize = do_resize self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "image_mean": self.image_mean, "image_std": self.image_std, "min_pixels": self.min_pixels, "max_pixels": self.max_pixels, "patch_size": self.patch_size, "temporal_patch_size": self.temporal_patch_size, "merge_size": self.merge_size, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) return [[image] for image in images] @require_torch @require_vision class Qwen2VLImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Qwen2VLImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Qwen2VLImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "min_pixels")) self.assertTrue(hasattr(image_processing, "max_pixels")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "patch_size")) self.assertTrue(hasattr(image_processing, "temporal_patch_size")) self.assertTrue(hasattr(image_processing, "merge_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.min_pixels, 56 * 56) self.assertEqual(image_processor.max_pixels, 28 * 28 * 1280) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, min_pixels=256 * 256, max_pixels=640 * 640 ) self.assertEqual(image_processor.min_pixels, 256 * 256) self.assertEqual(image_processor.max_pixels, 640 * 640) def test_select_best_resolution(self): # Test with a final resize resolution best_resolution = smart_resize(561, 278, factor=28) self.assertEqual(best_resolution, (560, 280)) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image[0], Image.Image) # Test not batched input prcocess_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (4900, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched prcocess_out = image_processing(image_inputs, return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image[0], np.ndarray) # Test not batched input prcocess_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (4900, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched prcocess_out = image_processing(image_inputs, return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image[0], torch.Tensor) # Test not batched input prcocess_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (4900, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched prcocess_out = image_processing(image_inputs, return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) @unittest.skip(reason="Qwen2VLImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass def test_nested_input(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images prcocess_out = image_processing(image_inputs, return_tensors="pt") encoded_images = prcocess_out.pixel_values image_grid_thws = prcocess_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = image_inputs[:3] + image_inputs[3:] prcocess_out = image_processing(image_inputs_nested, return_tensors="pt") encoded_images_nested = prcocess_out.pixel_values image_grid_thws_nested = prcocess_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Image processor should return same pixel values, independently of ipnut format self.assertTrue((encoded_images_nested == encoded_images).all()) self.assertTrue((image_grid_thws_nested == expected_image_grid_thws).all())
transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py/0
{ "file_path": "transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", "repo_id": "transformers", "token_count": 4620 }
419
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SAM model.""" import gc import unittest import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig, pipeline from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SamModel, SamProcessor if is_vision_available(): from PIL import Image class SamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class SamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps def get_config(self): return SamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class SamModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = SamPromptEncoderTester() self.mask_decoder_tester = SamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.get_image_embeddings(pixel_values) self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=False, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SamModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": SamModel, "mask-generation": SamModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = SamModelTester(self) self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False) self.prompt_encoder_config_tester = ConfigTester( self, config_class=SamPromptEncoderConfig, has_text_modality=False, num_attention_heads=12, num_hidden_layers=2, ) self.mask_decoder_config_tester = ConfigTester( self, config_class=SamMaskDecoderConfig, has_text_modality=False ) def test_config(self): self.vision_config_tester.run_common_tests() self.prompt_encoder_config_tester.run_common_tests() self.mask_decoder_config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="SamModel does not support training") def test_training(self): pass @unittest.skip(reason="SamModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SamModel does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # Use a slightly higher default tol to make the tests non-flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @slow def test_model_from_pretrained(self): model_name = "facebook/sam-vit-huge" model = SamModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @slow class SamModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) def test_inference_mask_generation_no_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.4515), atol=2e-4)) self.assertTrue(torch.allclose(masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), atol=2e-4)) def test_inference_mask_generation_one_point_one_bb(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9566), atol=2e-4)) self.assertTrue( torch.allclose(masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), atol=2e-4) ) def test_inference_mask_generation_batched_points_batched_images(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze().cpu() masks = outputs.pred_masks[0, 0, 0, 0, :3].cpu() EXPECTED_SCORES = torch.tensor( [ [ [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], [ [0.3317, 0.7264, 0.7646], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], ] ) EXPECTED_MASKS = torch.tensor([-2.8550, -2.7988, -2.9625]) self.assertTrue(torch.allclose(scores, EXPECTED_SCORES, atol=1e-3)) self.assertTrue(torch.allclose(masks, EXPECTED_MASKS, atol=1e-3)) def test_inference_mask_generation_one_point_one_bb_zero(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7894), atol=1e-4)) def test_inference_mask_generation_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4)) # With no label input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4)) def test_inference_mask_generation_two_points(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4)) # no labels inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4)) def test_inference_mask_generation_two_points_batched(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[0][-1], torch.tensor(0.9762), atol=1e-4)) self.assertTrue(torch.allclose(scores[1][-1], torch.tensor(0.9637), atol=1e-4)) def test_inference_mask_generation_one_box(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7937), atol=1e-4)) def test_inference_mask_generation_batched_image_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores_batched = outputs.iou_scores.squeeze() input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores_single = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores_batched[1, :], scores_single, atol=1e-4)) def test_inference_mask_generation_two_points_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: skip input_points = input_points.unsqueeze(0) inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 2, 3)) torch.testing.assert_close( iou_scores, torch.tensor([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4 ) def test_inference_mask_generation_three_boxes_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() # fmt: off input_boxes = torch.Tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]).cpu() EXPECTED_IOU = torch.tensor([[[0.9773, 0.9881, 0.9522], [0.5996, 0.7661, 0.7937], [0.5996, 0.7661, 0.7937]]]) # fmt: on input_boxes = input_boxes.unsqueeze(0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 3, 3)) torch.testing.assert_close(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4) def test_dummy_pipeline_generation(self): generator = pipeline("mask-generation", model="facebook/sam-vit-base", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
transformers/tests/models/sam/test_modeling_sam.py/0
{ "file_path": "transformers/tests/models/sam/test_modeling_sam.py", "repo_id": "transformers", "token_count": 12984 }
420
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SegGpt model.""" import inspect import math import unittest from datasets import load_dataset from transformers import SegGptConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SegGptForImageSegmentation, SegGptModel from transformers.models.seggpt.modeling_seggpt import SegGptLoss if is_vision_available(): from transformers import SegGptImageProcessor class SegGptModelTester: def __init__( self, parent, batch_size=2, image_size=30, patch_size=2, num_channels=3, is_training=False, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, mlp_ratio=2.0, merge_index=0, intermediate_hidden_state_indices=[1], pretrain_image_size=10, decoder_hidden_size=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.mlp_ratio = mlp_ratio self.merge_index = merge_index self.intermediate_hidden_state_indices = intermediate_hidden_state_indices self.pretrain_image_size = pretrain_image_size self.decoder_hidden_size = decoder_hidden_size # in SegGpt, the seq length equals the number of patches (we don't use the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) prompt_pixel_values = floats_tensor( [self.batch_size, self.num_channels, self.image_size // 2, self.image_size] ) prompt_masks = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) labels = None if self.use_labels: labels = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) config = self.get_config() return config, pixel_values, prompt_pixel_values, prompt_masks, labels def get_config(self): return SegGptConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, initializer_range=self.initializer_range, mlp_ratio=self.mlp_ratio, merge_index=self.merge_index, intermediate_hidden_state_indices=self.intermediate_hidden_state_indices, pretrain_image_size=self.pretrain_image_size, decoder_hidden_size=self.decoder_hidden_size, ) def create_and_check_model(self, config, pixel_values, prompt_pixel_values, prompt_masks, labels): model = SegGptModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, prompt_pixel_values, prompt_masks) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.image_size // self.patch_size, self.image_size // self.patch_size, self.hidden_size, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, prompt_pixel_values, prompt_masks, labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "prompt_pixel_values": prompt_pixel_values, "prompt_masks": prompt_masks, } return config, inputs_dict @require_torch class SegGptModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SegGpt does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SegGptModel, SegGptForImageSegmentation) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False pipeline_model_mapping = ( {"feature-extraction": SegGptModel, "mask-generation": SegGptModel} if is_torch_available() else {} ) def setUp(self): self.model_tester = SegGptModelTester(self) self.config_tester = ConfigTester(self, config_class=SegGptConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SegGpt does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "prompt_pixel_values", "prompt_masks"] self.assertListEqual(arg_names[:3], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_height = patch_width = config.image_size // config.patch_size self.assertListEqual( list(hidden_states[0].shape[-3:]), [patch_height, patch_width, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_batching_equivalence(self): def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( torch.max(torch.abs(batched_row - single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={torch.max(torch.abs(batched_row - single_row_object))}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: # the first hidden state in SegGPT has weird hack of adding first half of batch with second half if key == "hidden_states": model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def test_seggpt_loss(self): torch.manual_seed(100) config = self.model_tester.get_config() prompt_masks = torch.rand(1, config.num_channels, config.image_size, config.image_size) label = torch.rand(1, config.num_channels, config.image_size, config.image_size) pred_masks = torch.rand(1, config.num_channels, config.image_size * 2, config.image_size) # seq_len x 2 because the loss concatenates prompt_masks and labels as pred_masks is concatenated bool_masked_pos = torch.rand(1, self.model_tester.seq_length * 2) > 0.5 loss = SegGptLoss(config) loss_value = loss(prompt_masks, pred_masks, label, bool_masked_pos) expected_loss_value = torch.tensor(0.3340) self.assertTrue(torch.allclose(loss_value, expected_loss_value, atol=1e-4)) @slow def test_model_from_pretrained(self): model_name = "BAAI/seggpt-vit-large" model = SegGptModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] images = [image.convert("RGB") for image in ds["image"]] masks = [image.convert("RGB") for image in ds["mask"]] return images, masks def prepare_bool_masked_pos(config: SegGptConfig): num_patches = math.prod([i // config.patch_size for i in config.image_size]) mask_ratio = 0.75 torch.manual_seed(2) num_masked_patches = int(num_patches * mask_ratio) shuffle_idx = torch.randperm(num_patches) bool_masked_pos = torch.FloatTensor([0] * (num_patches - num_masked_patches) + [1] * num_masked_patches)[ shuffle_idx ] bool_masked_pos = bool_masked_pos.unsqueeze(0).bool() return bool_masked_pos @require_torch @require_vision class SegGptModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return SegGptImageProcessor.from_pretrained("BAAI/seggpt-vit-large") if is_vision_available() else None @slow def test_one_shot_inference(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_image = images[1] prompt_image = images[0] prompt_mask = masks[0] inputs = image_processor( images=input_image, prompt_images=prompt_image, prompt_masks=prompt_mask, return_tensors="pt", do_convert_rgb=False, ) inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 3, 896, 448)) self.assertEqual(outputs.pred_masks.shape, expected_shape) expected_slice = torch.tensor( [ [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_slice, atol=1e-4)) result = image_processor.post_process_semantic_segmentation(outputs, [input_image.size[::-1]])[0] result_expected_shape = torch.Size((170, 297)) expected_area = 1082 area = (result > 0).sum().item() self.assertEqual(result.shape, result_expected_shape) self.assertEqual(area, expected_area) @slow def test_few_shot_inference(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_images = [images[1]] * 2 prompt_images = [images[0], images[2]] prompt_masks = [masks[0], masks[2]] inputs = image_processor( images=input_images, prompt_images=prompt_images, prompt_masks=prompt_masks, return_tensors="pt", do_convert_rgb=False, ) inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs, feature_ensemble=True) expected_shape = torch.Size((2, 3, 896, 448)) expected_slice = torch.tensor( [ [[-2.1201, -2.1192, -2.1189], [-2.1217, -2.1210, -2.1204], [-2.1216, -2.1202, -2.1194]], [[-2.0393, -2.0390, -2.0387], [-2.0402, -2.0402, -2.0397], [-2.0400, -2.0394, -2.0388]], [[-1.8083, -1.8076, -1.8077], [-1.8105, -1.8102, -1.8099], [-1.8105, -1.8095, -1.8090]], ] ).to(torch_device) self.assertEqual(outputs.pred_masks.shape, expected_shape) self.assertTrue(torch.allclose(outputs.pred_masks[0, :, 448:451, :3], expected_slice, atol=4e-4)) @slow def test_one_shot_with_label(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_image = images[1] label = masks[1] prompt_image = images[0] prompt_mask = masks[0] inputs = image_processor( images=input_image, prompt_masks=prompt_mask, prompt_images=prompt_image, return_tensors="pt", do_convert_rgb=False, ).to(torch_device) labels = image_processor(images=None, prompt_masks=label, return_tensors="pt", do_convert_rgb=False)[ "prompt_masks" ].to(torch_device) bool_masked_pos = prepare_bool_masked_pos(model.config).to(torch_device) with torch.no_grad(): outputs = model(**inputs, labels=labels, bool_masked_pos=bool_masked_pos) expected_loss = torch.tensor(0.0074).to(torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
transformers/tests/models/seggpt/test_modeling_seggpt.py/0
{ "file_path": "transformers/tests/models/seggpt/test_modeling_seggpt.py", "repo_id": "transformers", "token_count": 8356 }
421
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import unittest from transformers import AutoBackbone from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class TimmBackboneModelTester: def __init__( self, parent, out_indices=None, out_features=None, stage_names=None, backbone="resnet18", batch_size=3, image_size=32, num_channels=3, is_training=True, use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] self.stage_names = stage_names self.out_features = out_features self.backbone = backbone self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmBackboneConfig( image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) def create_and_check_model(self, config, pixel_values): model = TimmBackbone(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False def setUp(self): # self.config_class = PretrainedConfig self.config_class = TimmBackboneConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester( self, config_class=self.config_class, has_text_modality=False, common_properties=["num_channels"] ) def test_config(self): self.config_tester.run_common_tests() def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices, [-1]) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip(reason="TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip(reason="TimmBackbone initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass @unittest.skip(reason="model weights aren't tied in TimmBackbone.") def test_tie_model_weights(self): pass @unittest.skip(reason="model weights aren't tied in TimmBackbone.") def test_tied_model_weights_key_ignore(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip(reason="TimmBackbone doesn't support output_attentions.") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip(reason="Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # TimmBackbone config doesn't have out_features attribute def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py/0
{ "file_path": "transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py", "repo_id": "transformers", "token_count": 4390 }
422
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from transformers import AutoProcessor @require_vision class LlavaProcessorTest(unittest.TestCase): def test_chat_template(self): processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf") expected_prompt = "###Human: <image>\nWhat is shown in this image?###Assistant:" messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(expected_prompt, formatted_prompt)
transformers/tests/models/vipllava/test_processor_vipllava.py/0
{ "file_path": "transformers/tests/models/vipllava/test_processor_vipllava.py", "repo_id": "transformers", "token_count": 535 }
423
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ViViT model.""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VivitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VivitForVideoClassification, VivitModel if is_vision_available(): from transformers import VivitImageProcessor class VivitModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_labels=True, num_labels=10, image_size=10, num_frames=8, # decreased, because default 32 takes too much RAM at inference tubelet_size=[2, 4, 4], num_channels=3, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu_fast", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.image_size = image_size self.num_frames = num_frames self.tubelet_size = tubelet_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.scope = scope self.seq_length = ( (self.image_size // self.tubelet_size[2]) * (self.image_size // self.tubelet_size[1]) * (self.num_frames // self.tubelet_size[0]) ) + 1 # CLS token def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = VivitConfig( num_frames=self.num_frames, image_size=self.image_size, tubelet_size=self.tubelet_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = VivitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = VivitForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) # verify the logits shape expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VivitModelTester(self) self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Vivit does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "head_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/vivit-b-16x2-kinetics400" model = VivitModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: seq_len = self.model_tester.seq_length inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VivitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return VivitImageProcessor() if is_vision_available() else None @slow def test_inference_for_video_classification(self): model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) # taken from original model expected_slice = torch.tensor([-0.9498, 2.7971, -1.4049, 0.1024, -1.8353]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4)) @slow def test_inference_interpolate_pos_encoding(self): # Vivit models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model = VivitModel.from_pretrained("google/vivit-b-16x2").to(torch_device) image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2") video = prepare_video() inputs = image_processor( video, size={"shortest_edge": 480}, crop_size={"height": 480, "width": 480}, return_tensors="pt" ) pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # verify the logits shape expected_shape = torch.Size((1, 3137, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
transformers/tests/models/vivit/test_modeling_vivit.py/0
{ "file_path": "transformers/tests/models/vivit/test_modeling_vivit.py", "repo_id": "transformers", "token_count": 6198 }
424
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoFeatureExtractor, AutoProcessor from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wav2vec2.test_feature_extraction_wav2vec2 import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm import Wav2Vec2DecoderWithLMOutput if is_torch_available(): from transformers import Wav2Vec2ForCTC @require_pyctcdecode class Wav2Vec2ProcessorWithLMTest(unittest.TestCase): def setUp(self): vocab = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.add_kwargs_tokens_map = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") # load decoder from hub self.decoder_name = "hf-internal-testing/ngram-beam-search-decoder" def get_tokenizer(self, **kwargs_init): kwargs = self.add_kwargs_tokens_map.copy() kwargs.update(kwargs_init) return Wav2Vec2CTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Wav2Vec2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def get_decoder(self, **kwargs): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) processor.save_pretrained(self.tmpdirname) processor = Wav2Vec2ProcessorWithLM.from_pretrained(self.tmpdirname) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, BeamSearchDecoderCTC) def test_save_load_pretrained_additional_features(self): processor = Wav2Vec2ProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname) # make sure that error is thrown when decoder alphabet doesn't match processor = Wav2Vec2ProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0) self.assertEqual(processor.language_model.beta, 3.0) self.assertEqual(processor.language_model.score_boundary, -7.0) self.assertEqual(processor.language_model.unk_score_offset, 3) def test_load_decoder_tokenizer_mismatch_content(self): tokenizer = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"]) with self.assertRaisesRegex(ValueError, "include"): Wav2Vec2ProcessorWithLM( tokenizer=tokenizer, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_another_feature_extractor(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0") tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_wrong_feature_extractor_raises_error(self): feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large-v3") tokenizer = self.get_tokenizer() decoder = self.get_decoder() with self.assertRaises(ValueError): Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def _get_dummy_logits(self, shape=(2, 10, 16), seed=77): np.random.seed(seed) return np.random.rand(*shape) def test_decoder(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits(shape=(10, 16), seed=13) decoded_processor = processor.decode(logits) decoded_decoder = decoder.decode_beams(logits)[0] self.assertEqual(decoded_decoder[0], decoded_processor.text) self.assertEqual("</s> <s> </s>", decoded_processor.text) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score) @parameterized.expand([[None], ["fork"], ["spawn"]]) def test_decoder_batch(self, pool_context): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: decoded_processor = processor.batch_decode(logits) else: with get_context(pool_context).Pool() as pool: decoded_processor = processor.batch_decode(logits, pool) logits_list = list(logits) with get_context("fork").Pool() as p: decoded_beams = decoder.decode_beams_batch(p, logits_list) texts_decoder, logit_scores_decoder, lm_scores_decoder = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0]) logit_scores_decoder.append(beams[0][-2]) lm_scores_decoder.append(beams[0][-1]) self.assertListEqual(texts_decoder, decoded_processor.text) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"], decoded_processor.text) self.assertListEqual(logit_scores_decoder, decoded_processor.logit_score) self.assertListEqual(lm_scores_decoder, decoded_processor.lm_score) def test_decoder_with_params(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() beam_width = 15 beam_prune_logp = -20.0 token_min_logp = -4.0 decoded_processor_out = processor.batch_decode( logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, ) decoded_processor = decoded_processor_out.text logits_list = list(logits) with get_context("fork").Pool() as pool: decoded_decoder_out = decoder.decode_beams_batch( pool, logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, ) decoded_decoder = [d[0][0] for d in decoded_decoder_out] logit_scores = [d[0][2] for d in decoded_decoder_out] lm_scores = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(decoded_decoder, decoded_processor) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"], decoded_processor) self.assertTrue(np.array_equal(logit_scores, decoded_processor_out.logit_score)) self.assertTrue(np.allclose([-20.054, -18.447], logit_scores, atol=1e-3)) self.assertTrue(np.array_equal(lm_scores, decoded_processor_out.lm_score)) self.assertTrue(np.allclose([-15.554, -13.9474], lm_scores, atol=1e-3)) def test_decoder_with_params_of_lm(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() alpha = 2.0 beta = 5.0 unk_score_offset = -20.0 lm_score_boundary = True decoded_processor_out = processor.batch_decode( logits, alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary, ) decoded_processor = decoded_processor_out.text logits_list = list(logits) decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary, ) with get_context("fork").Pool() as pool: decoded_decoder_out = decoder.decode_beams_batch( pool, logits_list, ) decoded_decoder = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(decoded_decoder, decoded_processor) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"], decoded_processor) lm_model = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0) self.assertEqual(lm_model.beta, 5.0) self.assertEqual(lm_model.unk_score_offset, -20.0) self.assertEqual(lm_model.score_boundary, True) def test_decoder_download_ignores_files(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") language_model = processor.decoder.model_container[processor.decoder._model_key] path_to_cached_dir = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute() downloaded_decoder_files = os.listdir(path_to_cached_dir) expected_decoder_files = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(downloaded_decoder_files, expected_decoder_files) def test_decoder_local_files(self): local_dir = snapshot_download("hf-internal-testing/processor_with_lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained(local_dir) language_model = processor.decoder.model_container[processor.decoder._model_key] path_to_cached_dir = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute() local_decoder_files = os.listdir(local_dir) expected_decoder_files = os.listdir(path_to_cached_dir) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(local_decoder_files, expected_decoder_files) def test_processor_from_auto_processor(self): processor_wav2vec2 = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") processor_auto = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm") raw_speech = floats_list((3, 1000)) input_wav2vec2 = processor_wav2vec2(raw_speech, return_tensors="np") input_auto = processor_auto(raw_speech, return_tensors="np") for key in input_wav2vec2.keys(): self.assertAlmostEqual(input_wav2vec2[key].sum(), input_auto[key].sum(), delta=1e-2) logits = self._get_dummy_logits() decoded_wav2vec2 = processor_wav2vec2.batch_decode(logits) decoded_auto = processor_auto.batch_decode(logits) self.assertListEqual(decoded_wav2vec2.text, decoded_auto.text) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets_integration_fast(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") logits = self._get_dummy_logits()[0] outputs = processor.decode(logits, output_word_offsets=True) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys()), 4) self.assertTrue("text" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2DecoderWithLMOutput)) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"], "word")), outputs.text) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "word"), ["<s>", "<s>", "</s>"]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "start_offset"), [0, 2, 4]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "end_offset"), [1, 3, 5]) def test_offsets_integration_fast_batch(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") logits = self._get_dummy_logits() outputs = processor.batch_decode(logits, output_word_offsets=True) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys()), 4) self.assertTrue("text" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2DecoderWithLMOutput)) self.assertListEqual( [" ".join(self.get_from_offsets(o, "word")) for o in outputs["word_offsets"]], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "word"), ["<s>", "<s>", "</s>"]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "start_offset"), [0, 2, 4]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "end_offset"), [1, 3, 5]) @slow @require_torch @require_torchaudio def test_word_time_stamp_integration(self): import torch ds = load_dataset( "mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True, trust_remote_code=True ) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) ds_iter = iter(ds) sample = next(ds_iter) processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values).logits.cpu().numpy() output = processor.decode(logits[0], output_word_offsets=True) time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate word_time_stamps = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] EXPECTED_TEXT = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" EXPECTED_TEXT = "THE TRACK APPEARS ON THE COMPILATION ALBUM CRAFT FORKS" # output words self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), EXPECTED_TEXT) self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), output.text) # output times start_times = torch.tensor(self.get_from_offsets(word_time_stamps, "start_time")) end_times = torch.tensor(self.get_from_offsets(word_time_stamps, "end_time")) # fmt: off expected_start_tensor = torch.tensor([0.6800, 0.8800, 1.1800, 1.8600, 1.9600, 2.1000, 3.0000, 3.5600, 3.9800]) expected_end_tensor = torch.tensor([0.7800, 1.1000, 1.6600, 1.9200, 2.0400, 2.8000, 3.3000, 3.8800, 4.2800]) # fmt: on self.assertTrue(torch.allclose(start_times, expected_start_tensor, atol=0.01)) self.assertTrue(torch.allclose(end_times, expected_end_tensor, atol=0.01))
transformers/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py/0
{ "file_path": "transformers/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py", "repo_id": "transformers", "token_count": 9340 }
425
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/xglm-564M" tokenizer_class = XGLMTokenizer rust_tokenizer_class = XGLMTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(len(vocab_keys), 1_008) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_008) def test_full_tokenizer(self): tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["โ–This", "โ–is", "โ–a", "โ–t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsรฉ.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "รฉ", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return XGLMTokenizer.from_pretrained("facebook/xglm-564M") def test_picklable_without_disk(self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SAMPLE_VOCAB, f.name) tokenizer = XGLMTokenizer(f.name, keep_accents=True) pickled_tokenizer = pickle.dumps(tokenizer) pickle.loads(pickled_tokenizer) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsรฉ." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [2, 31227, 4447, 35] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: skip self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = { 'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="facebook/xglm-564M", padding=False, )
transformers/tests/models/xglm/test_tokenization_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_tokenization_xglm.py", "repo_id": "transformers", "token_count": 4238 }
426
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, Text2TextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer, torch_dtype=torch_dtype) return generator, ["Something to write", "Something else"] def run_pipeline_test(self, generator, _): outputs = generator("Something there") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there")) outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) outputs = generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): generator(4) @require_torch def test_small_model_pt(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}]) num_return_sequences = 3 outputs = generator( "Something there", num_return_sequences=num_return_sequences, num_beams=num_return_sequences, ) target_outputs = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(outputs, target_outputs) outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ) generator.tokenizer.pad_token_id = generator.model.config.eos_token_id generator.tokenizer.pad_token = "<pad>" outputs = generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ], ) @require_tf def test_small_model_tf(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}])
transformers/tests/pipelines/test_pipelines_text2text_generation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text2text_generation.py", "repo_id": "transformers", "token_count": 2131 }
427
# Testing mixed int8 quantization ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1660567705337-62441d1d9fdefb55a0b7d12c.png) The following is the recipe on how to effectively debug `bitsandbytes` integration on Hugging Face `transformers`. ## Library requirements + `transformers>=4.22.0` + `accelerate>=0.12.0` + `bitsandbytes>=0.31.5`. ## Hardware requirements The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported. ## Virutal envs ```bash conda create --name int8-testing python==3.8 pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` if `transformers>=4.23.0` is not released yet, then use: ```bash pip install git+https://github.com/huggingface/transformers.git ``` ## Troubleshooting A list of common errors: ### Torch does not correctly do the operations on GPU First check that: ```py import torch vec = torch.randn(1, 2, 3).to(0) ``` Works without any error. If not, install torch using `conda` like: ```bash conda create --name int8-testing python==3.8 conda install pytorch torchvision torchaudio cudatoolkit=11.6 -c pytorch -c conda-forge pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` For the latest pytorch instructions please see [this](https://pytorch.org/get-started/locally/) and the snippet above should work. ### ` bitsandbytes operations are not supported under CPU!` This happens when some Linear weights are set to the CPU when using `accelerate`. Please check carefully `model.hf_device_map` and make sure that there is no `Linear` module that is assigned to CPU. It is fine to have the last module (usually the Lm_head) set on CPU. ### `To use the type as a Parameter, please correct the detach() semantics defined by __torch_dispatch__() implementation.` Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved. ### `Parameter has no attribue .CB` Same solution as above. ### `RuntimeError: CUDA error: an illegal memory access was encountered ... consider passing CUDA_LAUNCH_BLOCKING=1` Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe an error as described in the next section. ### `CUDA illegal memory error: an illegal memory access at line...`: Check the CUDA verisons with: ```bash nvcc --version ``` and confirm it is the same version as the one detected by `bitsandbytes`. If not, run: ```bash ls -l $CONDA_PREFIX/lib/libcudart.so ``` or ```bash ls -l $LD_LIBRARY_PATH ``` Check if `libcudart.so` has a correct symlink that is set. Sometimes `nvcc` detects the correct CUDA version but `bitsandbytes` doesn't. You have to make sure that the symlink that is set for the file `libcudart.so` is redirected to the correct CUDA file. Here is an example of a badly configured CUDA installation: `nvcc --version` gives: ![Screenshot 2022-08-15 at 15.12.23.png](https://cdn-uploads.huggingface.co/production/uploads/1660569220888-62441d1d9fdefb55a0b7d12c.png) which means that the detected CUDA version is 11.3 but `bitsandbytes` outputs: ![image.png](https://cdn-uploads.huggingface.co/production/uploads/1660569284243-62441d1d9fdefb55a0b7d12c.png) First check: ```bash echo $LD_LIBRARY_PATH ``` If this contains multiple paths separated by `:`. Then you have to make sure that the correct CUDA version is set. By doing: ```bash ls -l $path/libcudart.so ``` On each path (`$path`) separated by `:`. If not, simply run ```bash ls -l $LD_LIBRARY_PATH/libcudart.so ``` and you can see ![Screenshot 2022-08-15 at 15.12.33.png](https://cdn-uploads.huggingface.co/production/uploads/1660569176504-62441d1d9fdefb55a0b7d12c.png) If you see that the file is linked to the wrong CUDA version (here 10.2), find the correct location for `libcudart.so` (`find --name libcudart.so`) and replace the environment variable `LD_LIBRARY_PATH` with the one containing the correct `libcudart.so` file.
transformers/tests/quantization/bnb/README.md/0
{ "file_path": "transformers/tests/quantization/bnb/README.md", "repo_id": "transformers", "token_count": 1405 }
428
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig from transformers.testing_utils import ( require_torch_gpu, require_torch_multi_gpu, require_torchao, torch_device, ) from transformers.utils import is_torch_available, is_torchao_available if is_torch_available(): import torch if is_torchao_available(): from torchao.dtypes import AffineQuantizedTensor from torchao.dtypes.affine_quantized_tensor import TensorCoreTiledLayoutType def check_torchao_quantized(test_module, qlayer, batch_size=1, context_size=1024): weight = qlayer.weight test_module.assertTrue(isinstance(weight, AffineQuantizedTensor)) test_module.assertEqual(weight.quant_min, 0) test_module.assertEqual(weight.quant_max, 15) test_module.assertTrue(isinstance(weight.layout_type, TensorCoreTiledLayoutType)) def check_forward(test_module, model, batch_size=1, context_size=1024): # Test forward pass with torch.no_grad(): out = model(torch.zeros([batch_size, context_size], device=model.device, dtype=torch.int32)).logits test_module.assertEqual(out.shape[0], batch_size) test_module.assertEqual(out.shape[1], context_size) @require_torch_gpu @require_torchao class TorchAoConfigTest(unittest.TestCase): def test_to_dict(self): """ Makes sure the config format is properly set """ quantization_config = TorchAoConfig("int4_weight_only") torchao_orig_config = quantization_config.to_dict() for key in torchao_orig_config: self.assertEqual(getattr(quantization_config, key), torchao_orig_config[key]) def test_post_init_check(self): """ Test kwargs validations in TorchAoConfig """ _ = TorchAoConfig("int4_weight_only") with self.assertRaisesRegex(ValueError, "is not supported yet"): _ = TorchAoConfig("fp6") with self.assertRaisesRegex(ValueError, "Unexpected keyword arg"): _ = TorchAoConfig("int4_weight_only", group_size1=32) @require_torch_gpu @require_torchao class TorchAoTest(unittest.TestCase): input_text = "What are we having for dinner?" max_new_tokens = 10 EXPECTED_OUTPUT = "What are we having for dinner?\n- 1. What is the temperature outside" model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_int4wo_quant(self): """ Simple LLM model testing int4 weight only quantization """ quant_config = TorchAoConfig("int4_weight_only", group_size=32) # Note: we quantize the bfloat16 model on the fly to int4 quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.bfloat16, device_map=torch_device, quantization_config=quant_config, ) tokenizer = AutoTokenizer.from_pretrained(self.model_name) check_torchao_quantized(self, quantized_model.model.layers[0].self_attn.v_proj) input_ids = tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_int4wo_quant_bfloat16_conversion(self): """ Testing the dtype of model will be modified to be bfloat16 for int4 weight only quantization """ quant_config = TorchAoConfig("int4_weight_only", group_size=32) # Note: we quantize the bfloat16 model on the fly to int4 quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=None, device_map=torch_device, quantization_config=quant_config, ) tokenizer = AutoTokenizer.from_pretrained(self.model_name) check_torchao_quantized(self, quantized_model.model.layers[0].self_attn.v_proj) input_ids = tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_int4wo_quant_multi_gpu(self): """ Simple test that checks if the quantized model int4 wieght only is working properly with multiple GPUs set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUS """ quant_config = TorchAoConfig("int4_weight_only", group_size=32) quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.bfloat16, device_map="auto", quantization_config=quant_config, ) tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) input_ids = tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_int4wo_offload(self): """ Simple test that checks if the quantized model int4 wieght only is working properly with cpu/disk offload """ device_map_offload = { "model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 0, "model.layers.2": 0, "model.layers.3": 0, "model.layers.4": 0, "model.layers.5": 0, "model.layers.6": 0, "model.layers.7": 0, "model.layers.8": 0, "model.layers.9": 0, "model.layers.10": 0, "model.layers.11": 0, "model.layers.12": 0, "model.layers.13": 0, "model.layers.14": 0, "model.layers.15": 0, "model.layers.16": 0, "model.layers.17": 0, "model.layers.18": 0, "model.layers.19": "cpu", "model.layers.20": "cpu", "model.layers.21": "disk", "model.norm": 0, "model.rotary_emb": 0, "lm_head": 0, } quant_config = TorchAoConfig("int4_weight_only", group_size=32) quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.bfloat16, device_map=device_map_offload, quantization_config=quant_config, ) tokenizer = AutoTokenizer.from_pretrained(self.model_name) input_ids = tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) EXPECTED_OUTPUT = "What are we having for dinner?\n- 2. What is the temperature outside" self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), EXPECTED_OUTPUT) if __name__ == "__main__": unittest.main()
transformers/tests/quantization/torchao_integration/test_torchao.py/0
{ "file_path": "transformers/tests/quantization/torchao_integration/test_torchao.py", "repo_id": "transformers", "token_count": 3433 }
429
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
transformers/tests/sagemaker/test_multi_node_model_parallel.py/0
{ "file_path": "transformers/tests/sagemaker/test_multi_node_model_parallel.py", "repo_id": "transformers", "token_count": 2103 }
430
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ isort:skip_file """ import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow if is_tokenizers_available(): from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json")) def test_len_tokenizer(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("bert-base-uncased") added_tokens_size = len(tokenizer.added_tokens_decoder) self.assertEqual(len(tokenizer), tokenizer.vocab_size) tokenizer.add_tokens(["<test_token>"]) self.assertEqual(len(tokenizer), tokenizer.vocab_size + 1) self.assertEqual(len(tokenizer.added_tokens_decoder), added_tokens_size + 1) self.assertEqual(len(tokenizer.added_tokens_encoder), added_tokens_size + 1)
transformers/tests/tokenization/test_tokenization_utils.py/0
{ "file_path": "transformers/tests/tokenization/test_tokenization_utils.py", "repo_id": "transformers", "token_count": 5982 }
431
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import List, Optional, Tuple, Union from transformers.utils import DocstringParsingException, TypeHintParsingException, get_json_schema class JsonSchemaGeneratorTest(unittest.TestCase): def test_simple_function(self): def fn(x: int): """ Test function Args: x: The input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": {"x": {"type": "integer", "description": "The input"}}, "required": ["x"], }, } self.assertEqual(schema["function"], expected_schema) def test_no_arguments(self): def fn(): """ Test function """ return True schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": {"type": "object", "properties": {}}, } self.assertEqual(schema["function"], expected_schema) def test_union(self): def fn(x: Union[int, float]): """ Test function Args: x: The input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": {"x": {"type": ["integer", "number"], "description": "The input"}}, "required": ["x"], }, } self.assertEqual(schema["function"], expected_schema) def test_optional(self): def fn(x: Optional[int]): """ Test function Args: x: The input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": {"x": {"type": "integer", "description": "The input", "nullable": True}}, "required": ["x"], }, } self.assertEqual(schema["function"], expected_schema) def test_default_arg(self): def fn(x: int = 42): """ Test function Args: x: The input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": {"type": "object", "properties": {"x": {"type": "integer", "description": "The input"}}}, } self.assertEqual(schema["function"], expected_schema) def test_nested_list(self): def fn(x: List[List[Union[str, int]]]): """ Test function Args: x: The input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "x": { "type": "array", "items": {"type": "array", "items": {"type": ["integer", "string"]}}, "description": "The input", } }, "required": ["x"], }, } self.assertEqual(schema["function"], expected_schema) def test_multiple_arguments(self): def fn(x: int, y: str): """ Test function Args: x: The input y: Also the input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "x": {"type": "integer", "description": "The input"}, "y": {"type": "string", "description": "Also the input"}, }, "required": ["x", "y"], }, } self.assertEqual(schema["function"], expected_schema) def test_multiple_complex_arguments(self): def fn(x: List[Union[int, float]], y: Optional[Union[int, str]] = None): """ Test function Args: x: The input y: Also the input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "x": {"type": "array", "items": {"type": ["integer", "number"]}, "description": "The input"}, "y": { "type": ["integer", "string"], "nullable": True, "description": "Also the input", }, }, "required": ["x"], }, } self.assertEqual(schema["function"], expected_schema) def test_missing_docstring(self): def fn(x: int): return x with self.assertRaises(DocstringParsingException): get_json_schema(fn) def test_missing_param_docstring(self): def fn(x: int): """ Test function """ return x with self.assertRaises(DocstringParsingException): get_json_schema(fn) def test_missing_type_hint(self): def fn(x): """ Test function Args: x: The input """ return x with self.assertRaises(TypeHintParsingException): get_json_schema(fn) def test_return_value(self): def fn(x: int) -> int: """ Test function Args: x: The input """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": {"x": {"type": "integer", "description": "The input"}}, "required": ["x"], }, "return": {"type": "integer"}, } self.assertEqual(schema["function"], expected_schema) def test_return_value_docstring(self): def fn(x: int) -> int: """ Test function Args: x: The input Returns: The output """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": {"x": {"type": "integer", "description": "The input"}}, "required": ["x"], }, "return": {"type": "integer", "description": "The output"}, } self.assertEqual(schema["function"], expected_schema) def test_tuple(self): def fn(x: Tuple[int, str]): """ Test function Args: x: The input Returns: The output """ return x schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "x": { "type": "array", "prefixItems": [{"type": "integer"}, {"type": "string"}], "description": "The input", } }, "required": ["x"], }, } self.assertEqual(schema["function"], expected_schema) def test_single_element_tuple_fails(self): def fn(x: Tuple[int]): """ Test function Args: x: The input Returns: The output """ return x # Single-element tuples should just be the type itself, or List[type] for variable-length inputs with self.assertRaises(TypeHintParsingException): get_json_schema(fn) def test_ellipsis_type_fails(self): def fn(x: Tuple[int, ...]): """ Test function Args: x: The input Returns: The output """ return x # Variable length inputs should be specified with List[type], not Tuple[type, ...] with self.assertRaises(TypeHintParsingException): get_json_schema(fn) def test_enum_extraction(self): def fn(temperature_format: str): """ Test function Args: temperature_format: The temperature format to use (Choices: ["celsius", "fahrenheit"]) Returns: The temperature """ return -40.0 # Let's see if that gets correctly parsed as an enum schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "temperature_format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature format to use", } }, "required": ["temperature_format"], }, } self.assertEqual(schema["function"], expected_schema) def test_multiline_docstring_with_types(self): def fn(x: int, y: int): """ Test function Args: x: The first input y: The second input. This is a longer description that spans multiple lines with indentation and stuff. Returns: God knows what """ pass schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "x": {"type": "integer", "description": "The first input"}, "y": { "type": "integer", "description": "The second input. This is a longer description that spans multiple lines with indentation and stuff.", }, }, "required": ["x", "y"], }, } self.assertEqual(schema["function"], expected_schema) def test_everything_all_at_once(self): def fn( x: str, y: Optional[List[Union[str, int]]], z: Tuple[Union[str, int], str] = (42, "hello") ) -> Tuple[int, str]: """ Test function with multiple args, and docstring args that we have to strip out. Args: x: The first input. It's got a big multiline description and also contains (choices: ["a", "b", "c"]) y: The second input. It's a big list with a single-line description. z: The third input. It's some kind of tuple with a default arg. Returns: The output. The return description is also a big multiline description that spans multiple lines. """ pass schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function with multiple args, and docstring args that we have to strip out.", "parameters": { "type": "object", "properties": { "x": { "type": "string", "enum": ["a", "b", "c"], "description": "The first input. It's got a big multiline description and also contains", }, "y": { "type": "array", "items": {"type": ["integer", "string"]}, "nullable": True, "description": "The second input. It's a big list with a single-line description.", }, "z": { "type": "array", "prefixItems": [{"type": ["integer", "string"]}, {"type": "string"}], "description": "The third input. It's some kind of tuple with a default arg.", }, }, "required": ["x", "y"], }, "return": { "type": "array", "prefixItems": [{"type": "integer"}, {"type": "string"}], "description": "The output. The return description is also a big multiline\n description that spans multiple lines.", }, } self.assertEqual(schema["function"], expected_schema)
transformers/tests/utils/test_chat_template_utils.py/0
{ "file_path": "transformers/tests/utils/test_chat_template_utils.py", "repo_id": "transformers", "token_count": 7722 }
432
# coding=utf-8 # Copyright 2020 The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import unittest from dataclasses import dataclass from typing import Optional from transformers import AlbertForMaskedLM from transformers.testing_utils import require_torch from transformers.utils import ModelOutput, is_torch_available if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_2 @dataclass class ModelOutputTest(ModelOutput): a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputTester(unittest.TestCase): def test_get_attributes(self): x = ModelOutputTest(a=30) self.assertEqual(x.a, 30) self.assertIsNone(x.b) self.assertIsNone(x.c) with self.assertRaises(AttributeError): _ = x.d def test_index_with_ints_and_slices(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) x = ModelOutputTest(a=30, c=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) def test_index_with_strings(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x["a"], 30) self.assertEqual(x["b"], 10) with self.assertRaises(KeyError): _ = x["c"] x = ModelOutputTest(a=30, c=10) self.assertEqual(x["a"], 30) self.assertEqual(x["c"], 10) with self.assertRaises(KeyError): _ = x["b"] def test_dict_like_properties(self): x = ModelOutputTest(a=30) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(list(x.values()), [30]) self.assertEqual(list(x.items()), [("a", 30)]) self.assertEqual(list(x), ["a"]) x = ModelOutputTest(a=30, b=10) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("b", 10)]) self.assertEqual(list(x), ["a", "b"]) x = ModelOutputTest(a=30, c=10) self.assertEqual(list(x.keys()), ["a", "c"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("c", 10)]) self.assertEqual(list(x), ["a", "c"]) with self.assertRaises(Exception): x = x.update({"d": 20}) with self.assertRaises(Exception): del x["a"] with self.assertRaises(Exception): _ = x.pop("a") with self.assertRaises(Exception): _ = x.setdefault("d", 32) def test_set_attributes(self): x = ModelOutputTest(a=30) x.a = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_set_keys(self): x = ModelOutputTest(a=30) x["a"] = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_instantiate_from_dict(self): x = ModelOutputTest({"a": 30, "b": 10}) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) def test_instantiate_from_iterator(self): x = ModelOutputTest([("a", 30), ("b", 10)]) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) with self.assertRaises(ValueError): _ = ModelOutputTest([("a", 30), (10, 10)]) x = ModelOutputTest(a=(30, 30)) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(x.a, (30, 30)) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch.utils._pytree as pytree x = ModelOutput({"a": 1.0, "c": 2.0}) self.assertFalse(pytree._is_leaf(x)) x = ModelOutputTest(a=1.0, c=2.0) self.assertFalse(pytree._is_leaf(x)) expected_flat_outs = [1.0, 2.0] expected_tree_spec = pytree.TreeSpec(ModelOutputTest, ["a", "c"], [pytree.LeafSpec(), pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x) if is_torch_greater_or_equal_than_2_2: self.assertEqual( pytree.treespec_dumps(actual_tree_spec), '[1, {"type": "tests.utils.test_model_output.ModelOutputTest", "context": "[\\"a\\", \\"c\\"]", "children_spec": [{"type": null, "context": null, "children_spec": []}, {"type": null, "context": null, "children_spec": []}]}]', ) # TODO: @ydshieh @unittest.skip(reason="CPU OOM") @require_torch def test_export_serialization(self): if not is_torch_greater_or_equal_than_2_2: self.skipTest(reason="Export serialization requires torch >= 2.2.0") model_cls = AlbertForMaskedLM model_config = model_cls.config_class() model = model_cls(model_config) input_dict = {"input_ids": torch.randint(0, 30000, (1, 512), dtype=torch.int64, requires_grad=False)} ep = torch.export.export(model, (), input_dict) buffer = io.BytesIO() torch.export.save(ep, buffer) buffer.seek(0) loaded_ep = torch.export.load(buffer) input_dict = {"input_ids": torch.randint(0, 30000, (1, 512), dtype=torch.int64, requires_grad=False)} assert torch.allclose(model(**input_dict).logits, loaded_ep(**input_dict).logits) class ModelOutputTestNoDataclass(ModelOutput): """Invalid test subclass of ModelOutput where @dataclass decorator is not used""" a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputSubclassTester(unittest.TestCase): def test_direct_model_output(self): # Check that direct usage of ModelOutput instantiates without errors ModelOutput({"a": 1.1}) def test_subclass_no_dataclass(self): # Check that a subclass of ModelOutput without @dataclass is invalid # A valid subclass is inherently tested other unit tests above. with self.assertRaises(TypeError): ModelOutputTestNoDataclass(a=1.1, b=2.2, c=3.3)
transformers/tests/utils/test_model_output.py/0
{ "file_path": "transformers/tests/utils/test_model_output.py", "repo_id": "transformers", "token_count": 3188 }
433
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for cleaning the model section of the table of content by removing duplicates and sorting the entries in alphabetical order. Usage (from the root of the repo): Check that the table of content is properly sorted (used in `make quality`): ```bash python utils/check_doc_toc.py ``` Auto-sort the table of content if it is not properly sorted (used in `make style`): ```bash python utils/check_doc_toc.py --fix_and_overwrite ``` """ import argparse from collections import defaultdict from typing import List import yaml PATH_TO_TOC = "docs/source/en/_toctree.yml" def clean_model_doc_toc(model_doc: List[dict]) -> List[dict]: """ Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates and sorting models alphabetically. Args: model_doc (`List[dict]`): The list of dictionaries extracted from the `_toctree.yml` file for this specific modality. Returns: `List[dict]`: List of dictionaries like the input, but cleaned up and sorted. """ counts = defaultdict(int) for doc in model_doc: counts[doc["local"]] += 1 duplicates = [key for key, value in counts.items() if value > 1] new_doc = [] for duplicate_key in duplicates: titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key}) if len(titles) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]}) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1]) # Sort return sorted(new_doc, key=lambda s: s["title"].lower()) def check_model_doc(overwrite: bool = False): """ Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model API doc) and potentially auto-cleans it. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`). """ with open(PATH_TO_TOC, encoding="utf-8") as f: content = yaml.safe_load(f.read()) # Get to the API doc api_idx = 0 while content[api_idx]["title"] != "API": api_idx += 1 api_doc = content[api_idx]["sections"] # Then to the model doc model_idx = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 model_doc = api_doc[model_idx]["sections"] # Extract the modalities and clean them one by one. modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section] diff = False for idx, modality_doc in modalities_docs: old_modality_doc = modality_doc["sections"] new_modality_doc = clean_model_doc_toc(old_modality_doc) if old_modality_doc != new_modality_doc: diff = True if overwrite: model_doc[idx]["sections"] = new_modality_doc if diff: if overwrite: api_doc[model_idx]["sections"] = model_doc content[api_idx]["sections"] = api_doc with open(PATH_TO_TOC, "w", encoding="utf-8") as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_model_doc(args.fix_and_overwrite)
transformers/utils/check_doc_toc.py/0
{ "file_path": "transformers/utils/check_doc_toc.py", "repo_id": "transformers", "token_count": 1732 }
434
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging logger = logging.get_logger(__name__) def extract_warnings_from_single_artifact(artifact_path, targets): """Extract warnings from a downloaded artifact (in .zip format)""" selected_warnings = set() buffer = [] def parse_line(fp): for line in fp: if isinstance(line, bytes): line = line.decode("UTF-8") if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" "): # process a single warning and move it to `selected_warnings`. if len(buffer) > 0: warning = "\n".join(buffer) # Only keep the warnings specified in `targets` if any(f": {x}: " in warning for x in targets): selected_warnings.add(warning) buffer.clear() continue else: line = line.strip() buffer.append(line) if from_gh: for filename in os.listdir(artifact_path): file_path = os.path.join(artifact_path, filename) if not os.path.isdir(file_path): # read the file if filename != "warnings.txt": continue with open(file_path) as fp: parse_line(fp) else: try: with zipfile.ZipFile(artifact_path) as z: for filename in z.namelist(): if not os.path.isdir(filename): # read the file if filename != "warnings.txt": continue with z.open(filename) as fp: parse_line(fp) except Exception: logger.warning( f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def extract_warnings(artifact_dir, targets): """Extract warnings from all artifact files""" selected_warnings = set() paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if (p.endswith(".zip") or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(p, targets)) return selected_warnings if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) args = parser.parse_args() from_gh = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v4` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links artifacts = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts selected_warnings = extract_warnings(args.output_dir, args.targets) selected_warnings = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
transformers/utils/extract_warnings.py/0
{ "file_path": "transformers/utils/extract_warnings.py", "repo_id": "transformers", "token_count": 2110 }
435
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that prepares the repository for releases (or patches) by updating all versions in the relevant places. It also performs some post-release cleanup, by updating the links in the main README to respective model doc pages (from main to stable). To prepare for a release, use from the root of the repo on the release branch with: ```bash python release.py ``` or use `make pre-release`. To prepare for a patch release, use from the root of the repo on the release branch with: ```bash python release.py --patch ``` or use `make pre-patch`. To do the post-release cleanup, use from the root of the repo on the main branch with: ```bash python release.py --post_release ``` or use `make post-release`. """ import argparse import os import re import packaging.version # All paths are defined with the intent that this script should be run from the root of the repo. PATH_TO_EXAMPLES = "examples/" # This maps a type of file to the pattern to look for when searching where the version is defined, as well as the # template to follow when replacing it with the new version. REPLACE_PATTERNS = { "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), } # This maps a type of file to its path in Transformers REPLACE_FILES = { "init": "src/transformers/__init__.py", "setup": "setup.py", } README_FILE = "README.md" def update_version_in_file(fname: str, version: str, file_type: str): """ Update the version of Transformers in one file. Args: fname (`str`): The path to the file where we want to update the version. version (`str`): The new version to set in the file. file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`). """ with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[file_type] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code) def update_version_in_examples(version: str): """ Update the version in all examples files. Args: version (`str`): The new version to set in the examples. """ for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, file_type="examples") def global_version_update(version: str, patch: bool = False): """ Update the version in all needed files. Args: version (`str`): The new version to set everywhere. patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: # We don't update the version in the examples for patch releases. update_version_in_examples(version) def get_version() -> packaging.version.Version: """ Reads the current version in the main __init__. """ with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) def pre_release_work(patch: bool = False): """ Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version eveywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if we have found the right version. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) def post_release_work(): """ Do all the necesarry post-release steps: - figure out the next dev version and ask confirmation - update the version eveywhere - clean-up the model list in the main README """ # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") args = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
transformers/utils/release.py/0
{ "file_path": "transformers/utils/release.py", "repo_id": "transformers", "token_count": 2388 }
436
{ "opsets": { "1": [ "Abs", "Add", "AddV2", "ArgMax", "ArgMin", "AvgPool", "AvgPool3D", "BatchMatMul", "BatchMatMulV2", "BatchToSpaceND", "BiasAdd", "BiasAddV1", "Cast", "Ceil", "CheckNumerics", "ComplexAbs", "Concat", "ConcatV2", "Const", "ConstV2", "Conv1D", "Conv2D", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropInputV2", "DepthToSpace", "DepthwiseConv2d", "DepthwiseConv2dNative", "Div", "Dropout", "Elu", "Equal", "Erf", "Exp", "ExpandDims", "Flatten", "Floor", "Gather", "GatherNd", "GatherV2", "Greater", "Identity", "IdentityN", "If", "LRN", "LSTMBlockCell", "LeakyRelu", "Less", "Log", "LogSoftmax", "LogicalAnd", "LogicalNot", "LogicalOr", "LookupTableSizeV2", "MatMul", "Max", "MaxPool", "MaxPool3D", "MaxPoolV2", "Maximum", "Mean", "Min", "Minimum", "MirrorPad", "Mul", "Neg", "NoOp", "NotEqual", "OneHot", "Pack", "Pad", "PadV2", "Placeholder", "PlaceholderV2", "PlaceholderWithDefault", "Pow", "Prod", "RFFT", "RandomNormal", "RandomNormalLike", "RandomUniform", "RandomUniformLike", "RealDiv", "Reciprocal", "Relu", "Relu6", "Reshape", "Rsqrt", "Selu", "Shape", "Sigmoid", "Sign", "Size", "Slice", "Softmax", "Softplus", "Softsign", "SpaceToBatchND", "SpaceToDepth", "Split", "SplitV", "Sqrt", "Square", "SquaredDifference", "Squeeze", "StatelessIf", "StopGradient", "StridedSlice", "StringJoin", "Sub", "Sum", "Tanh", "Tile", "TopKV2", "Transpose", "TruncateDiv", "Unpack", "ZerosLike" ], "2": [], "3": [], "4": [], "5": [], "6": [ "AddN", "All", "Any", "FloorDiv", "FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3" ], "7": [ "Acos", "Asin", "Atan", "Cos", "Fill", "FloorMod", "GreaterEqual", "LessEqual", "Loop", "MatrixBandPart", "Multinomial", "Range", "ResizeBilinear", "ResizeNearestNeighbor", "Scan", "Select", "SelectV2", "Sin", "SoftmaxCrossEntropyWithLogits", "SparseSoftmaxCrossEntropyWithLogits", "StatelessWhile", "Tan", "TensorListFromTensor", "TensorListGetItem", "TensorListLength", "TensorListReserve", "TensorListResize", "TensorListSetItem", "TensorListStack", "While" ], "8": [ "BroadcastTo", "ClipByValue", "FIFOQueueV2", "HashTableV2", "IteratorGetNext", "IteratorV2", "LookupTableFindV2", "MaxPoolWithArgmax", "QueueDequeueManyV2", "QueueDequeueUpToV2", "QueueDequeueV2", "ReverseSequence" ], "9": [ "SegmentMax", "SegmentMean", "SegmentMin", "SegmentProd", "SegmentSum", "Sinh", "SparseSegmentMean", "SparseSegmentMeanWithNumSegments", "SparseSegmentSqrtN", "SparseSegmentSqrtNWithNumSegments", "SparseSegmentSum", "SparseSegmentSumWithNumSegments", "UnsortedSegmentMax", "UnsortedSegmentMin", "UnsortedSegmentProd", "UnsortedSegmentSum", "Where" ], "10": [ "CropAndResize", "CudnnRNN", "DynamicStitch", "FakeQuantWithMinMaxArgs", "IsFinite", "IsInf", "NonMaxSuppressionV2", "NonMaxSuppressionV3", "NonMaxSuppressionV4", "NonMaxSuppressionV5", "ParallelDynamicStitch", "ReverseV2", "Roll" ], "11": [ "Bincount", "Cumsum", "InvertPermutation", "LeftShift", "MatrixDeterminant", "MatrixDiagPart", "MatrixDiagPartV2", "MatrixDiagPartV3", "RaggedRange", "RightShift", "Round", "ScatterNd", "SparseFillEmptyRows", "SparseReshape", "SparseToDense", "TensorScatterUpdate", "Unique" ], "12": [ "Einsum", "MatrixDiag", "MatrixDiagV2", "MatrixDiagV3", "MatrixSetDiagV3", "SquaredDistance" ], "13": [] } }
transformers/utils/tf_ops/onnx.json/0
{ "file_path": "transformers/utils/tf_ops/onnx.json", "repo_id": "transformers", "token_count": 4081 }
437
from dataclasses import dataclass import tyro from huggingface_hub import HfApi @dataclass class Args: folder_path: str = "benchmark/trl" path_in_repo: str = "images/benchmark" repo_id: str = "trl-internal-testing/example-images" repo_type: str = "dataset" args = tyro.cli(Args) api = HfApi() api.upload_folder( folder_path=args.folder_path, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type=args.repo_type, )
trl/benchmark/upload_benchmark.py/0
{ "file_path": "trl/benchmark/upload_benchmark.py", "repo_id": "trl", "token_count": 200 }
438
# Quickstart ## How does it work? Fine-tuning a language model via PPO consists of roughly three steps: 1. **Rollout**: The language model generates a response or continuation based on a query which could be the start of a sentence. 2. **Evaluation**: The query and response are evaluated with a function, model, human feedback, or some combination of them. The important thing is that this process should yield a scalar value for each query/response pair. The optimization will aim at maximizing this value. 3. **Optimization**: This is the most complex part. In the optimisation step the query/response pairs are used to calculate the log-probabilities of the tokens in the sequences. This is done with the model that is trained and a reference model, which is usually the pre-trained model before fine-tuning. The KL-divergence between the two outputs is used as an additional reward signal to make sure the generated responses don't deviate too far from the reference language model. The active language model is then trained with PPO. The full process is illustrated in the following figure: <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_overview.png"/> ## Minimal example The following code illustrates the steps above. ```python # 0. imports import torch from transformers import GPT2Tokenizer from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer # 1. load a pretrained model model = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2") ref_model = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token # 2. initialize trainer ppo_config = {"mini_batch_size": 1, "batch_size": 1} config = PPOConfig(**ppo_config) ppo_trainer = PPOTrainer(config, model, ref_model, tokenizer) # 3. encode a query query_txt = "This morning I went to the " query_tensor = tokenizer.encode(query_txt, return_tensors="pt").to(model.pretrained_model.device) # 4. generate model response generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "max_new_tokens": 20, } response_tensor = ppo_trainer.generate([item for item in query_tensor], return_prompt=False, **generation_kwargs) response_txt = tokenizer.decode(response_tensor[0]) # 5. define a reward for response # (this could be any reward such as human feedback or output from another model) reward = [torch.tensor(1.0, device=model.pretrained_model.device)] # 6. train model with ppo train_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward) ``` In general, you would run steps 3-6 in a for-loop and run it on many diverse queries. You can find more realistic examples in the examples section. ## How to use a trained model After training a `AutoModelForCausalLMWithValueHead`, you can directly use the model in `transformers`. ```python # .. Let's assume we have a trained model using `PPOTrainer` and `AutoModelForCausalLMWithValueHead` # push the model on the Hub model.push_to_hub("my-fine-tuned-model-ppo") # or save it locally model.save_pretrained("my-fine-tuned-model-ppo") # load the model from the Hub from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("my-fine-tuned-model-ppo") ``` You can also load your model with `AutoModelForCausalLMWithValueHead` if you want to use the value head, for example to continue training. ```python from trl.model import AutoModelForCausalLMWithValueHead model = AutoModelForCausalLMWithValueHead.from_pretrained("my-fine-tuned-model-ppo") ```
trl/docs/source/quickstart.mdx/0
{ "file_path": "trl/docs/source/quickstart.mdx", "repo_id": "trl", "token_count": 1120 }
439
# This is an example configuration file of TRL CLI, you can use it for # SFT like that: `trl sft --config config.yaml --output_dir test-sft` # The YAML file supports environment variables by adding an `env` field # as below # env: # CUDA_VISIBLE_DEVICES: 0 model_name_or_path: trl-internal-testing/tiny-random-LlamaForCausalLM dataset_name: imdb dataset_text_field: text report_to: none learning_rate: 0.0001 lr_scheduler_type: cosine
trl/examples/cli_configs/example_config.yaml/0
{ "file_path": "trl/examples/cli_configs/example_config.yaml", "repo_id": "trl", "token_count": 169 }
440
# DPO pipeline for the creation of StackLlaMa 2: a Stack exchange llama-v2-7b model ## Prerequisites Install all the dependencies in the `requirements.txt`: ``` $ pip install -U -r requirements.txt ``` Since we will use `accelerate` for training, make sure to run: ``` $ accelerate config ``` ## Training There were two main steps to the DPO training process: 1. Supervised fine-tuning of the base llama-v2-7b model to create llama-v2-7b-se: ``` accelerate launch examples/research_projects/stack_llama_2/scripts/sft_llama2.py \ --output_dir="./sft" \ --max_steps=500 \ --logging_steps=10 \ --save_steps=10 \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=1 \ --gradient_accumulation_steps=2 \ --gradient_checkpointing=False \ --group_by_length=False \ --learning_rate=1e-4 \ --lr_scheduler_type="cosine" \ --warmup_steps=100 \ --weight_decay=0.05 \ --optim="paged_adamw_32bit" \ --bf16=True \ --remove_unused_columns=False \ --run_name="sft_llama2" \ --report_to="wandb" ``` 1. Run the DPO trainer using the model saved by the previous step: ``` accelerate launch examples/research_projects/stack_llama_2/scripts/dpo_llama2.py \ --model_name_or_path="sft/final_checkpoint" \ --output_dir="dpo" ``` ## Merging the adaptors To merge the adaptors into the base model we can use the `merge_peft_adapter.py` helper script that comes with TRL: ``` python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --base_model_name="meta-llama/Llama-2-7b-hf" --adapter_model_name="dpo/final_checkpoint/" --output_name="stack-llama-2" ``` which will also push the model to your HuggingFace hub account. ## Running the model We can load the DPO-trained LoRA adaptors which were saved by the DPO training step and load them via: ```py from peft import AutoPeftModelForCausalLM model = AutoPeftModelForCausalLM.from_pretrained( "dpo/final_checkpoint", low_cpu_mem_usage=True, torch_dtype=torch.float16, load_in_4bit=True, ) model.generate(...) ```
trl/examples/research_projects/stack_llama_2/scripts/README.md/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/README.md", "repo_id": "trl", "token_count": 896 }
441
# flake8: noqa # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # regular: python examples/scripts/dpo.py \ --dataset_name=trl-internal-testing/hh-rlhf-helpful-base-trl-style \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --learning_rate 1e-3 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="dpo_anthropic_hh" \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns # peft: python examples/scripts/dpo.py \ --dataset_name=trl-internal-testing/hh-rlhf-helpful-base-trl-style \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --learning_rate 1e-3 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="dpo_anthropic_hh" \ --optim rmsprop \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns \ --use_peft \ --lora_r=16 \ --lora_alpha=16 """ import logging import multiprocessing import os from contextlib import nullcontext from trl.commands.cli_utils import DPOScriptArguments, init_zero_verbose, TrlParser from trl.env_utils import strtobool TRL_USE_RICH = strtobool(os.getenv("TRL_USE_RICH", "0")) if TRL_USE_RICH: init_zero_verbose() FORMAT = "%(message)s" from rich.console import Console from rich.logging import RichHandler import torch from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from accelerate import PartialState from trl import ( DPOConfig, DPOTrainer, ModelConfig, RichProgressCallback, get_kbit_device_map, get_peft_config, get_quantization_config, ) if TRL_USE_RICH: logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.INFO) if __name__ == "__main__": parser = TrlParser((DPOScriptArguments, DPOConfig, ModelConfig)) args, training_args, model_config = parser.parse_args_and_config() # Force use our print callback if TRL_USE_RICH: training_args.disable_tqdm = True console = Console() ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs ) peft_config = get_peft_config(model_config) if peft_config is None: ref_model = AutoModelForCausalLM.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, **model_kwargs ) else: ref_model = None tokenizer = AutoTokenizer.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token if tokenizer.chat_template is None: tokenizer.chat_template = "{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\n\n'}}{% endfor %}{{ eos_token }}" if args.ignore_bias_buffers: # torch distributed hack model._ddp_params_and_buffers_to_ignore = [ name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] ################ # Optional rich context managers ############### init_context = nullcontext() if not TRL_USE_RICH else console.status("[bold green]Initializing the DPOTrainer...") save_context = ( nullcontext() if not TRL_USE_RICH else console.status(f"[bold green]Training completed! Saving the model to {training_args.output_dir}") ) ################ # Dataset ################ ds = load_dataset(args.dataset_name) if args.sanity_check: for key in ds: ds[key] = ds[key].select(range(50)) def process(row): row["prompt"] = tokenizer.apply_chat_template(row["chosen"][:-1], tokenize=False) row["chosen"] = tokenizer.apply_chat_template([row["chosen"][-1]], tokenize=False) row["rejected"] = tokenizer.apply_chat_template([row["rejected"][-1]], tokenize=False) return row # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): ds = ds.map(process, num_proc=training_args.dataset_num_proc) train_dataset = ds[args.dataset_train_split] eval_dataset = ds[args.dataset_test_split] ################ # Training ################ with init_context: trainer = DPOTrainer( model, ref_model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, peft_config=peft_config, callbacks=[RichProgressCallback] if TRL_USE_RICH else None, ) trainer.train() with save_context: trainer.save_model(training_args.output_dir)
trl/examples/scripts/dpo.py/0
{ "file_path": "trl/examples/scripts/dpo.py", "repo_id": "trl", "token_count": 2542 }
442
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform import subprocess def test(): command = """\ python examples/scripts/ppo/ppo.py \ --learning_rate 3e-6 \ --output_dir models/minimal/ppo \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 1 \ --total_episodes 10 \ --model_name_or_path EleutherAI/pythia-14m \ --non_eos_penalty \ --save_strategy no \ --stop_token eos \ """ if platform.system() == "Windows": # windows CI does not work with subprocesses for some reason # e.g., https://github.com/huggingface/trl/actions/runs/9600036224/job/26475286210?pr=1743 return subprocess.run( command, shell=True, check=True, ) def test_num_train_epochs(): command = """\ python examples/scripts/ppo/ppo.py \ --learning_rate 3e-6 \ --output_dir models/minimal/ppo \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 1 \ --num_train_epochs 0.003 \ --model_name_or_path EleutherAI/pythia-14m \ --non_eos_penalty \ --save_strategy no \ --stop_token eos \ """ if platform.system() == "Windows": # windows CI does not work with subprocesses for some reason # e.g., https://github.com/huggingface/trl/actions/runs/9600036224/job/26475286210?pr=1743 return subprocess.run( command, shell=True, check=True, )
trl/tests/test_ppov2_trainer.py/0
{ "file_path": "trl/tests/test_ppov2_trainer.py", "repo_id": "trl", "token_count": 760 }
443
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import warnings from typing import Optional import torch from accelerate.utils import extract_model_from_parallel from transformers import StoppingCriteria, StoppingCriteriaList from ..import_utils import is_rich_available if is_rich_available(): from rich import print from rich.text import Text class StringStoppingCriteria(StoppingCriteria): """Custom `StoppingCriteria` which checks if all generations in the batch are completed.""" def __init__(self, stop_strings, tokenizer): self.stop_strings = stop_strings self.tokenizer = tokenizer self.first_call = True def __call__(self, input_ids, scores, **kwargs): """Returns true if all generated sequences contain any of the stop strings.""" if self.first_call: self.generated_tokens = [1 for _ in range(input_ids.shape[0])] self.start_length = input_ids.shape[-1] - 1 self.first_call = False decoded_generations = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) done = [] for i, decoded_generation in enumerate(decoded_generations): sequence_complete = any(stop_string in decoded_generation for stop_string in self.stop_strings) done.append(sequence_complete) if not sequence_complete: self.generated_tokens[i] += 1 if all(done): self.first_call = True return all(done) class TextHistory: """The TextHistory class keeps track of the history of an interaction between the language model and the environment.""" def __init__(self, text, tokens, system=True): """ Initialize TextHistory. args: text (`str`): The text of the first segment. tokens (`torch.LongTensor`): The tokens of the first segment. system (`bool`, *optional*): Whether the first segment is a system or user segment. """ self.system_spans = [] self.text_spans = [] self.token_spans = [] self.token_masks = torch.tensor([], dtype=torch.long).to(tokens.device) self.text = "" self.tokens = torch.tensor([], dtype=torch.long).to(tokens.device) self.completed = False self.truncated = False self.reward = 0.0 self.prompt_color = "black on grey85" self.system_color = "black on cyan3" self.model_color = "black on deep_sky_blue1" self.reward_color = "black on plum1" self.append_segment(text, tokens, system=system) def append_segment(self, text, tokens, system=True): """ Append a new segment to the history. args: text (`str`): The text of the new segment. tokens (`torch.LongTensor`): The tokens of the new segment. system (`bool`, *optional*): Whether the new segment is a system or user segment. """ if len(text) == 0 or len(tokens) == 0: raise ValueError("Can't append empty text or token list to history.") original_text_length = len(self.text) self.text += text self.text_spans.append((original_text_length, len(self.text))) self.system_spans.append(system) original_token_length = len(self.tokens) self.tokens = torch.cat((self.tokens, tokens)) if system: self.token_masks = torch.cat((self.token_masks, torch.zeros_like(tokens))) else: self.token_masks = torch.cat((self.token_masks, torch.ones_like(tokens))) self.token_spans.append((original_token_length, len(self.tokens))) def complete(self, truncated=False): """ Mark the history as completed. """ self.completed = True self.truncated = truncated @property def last_text_segment(self): """ Get the last text segment. """ start, end = self.text_spans[-1] return self.text[start:end] def split_query_response_tokens(self): """ Split the tokens into query and response tokens. """ split_index = self.token_spans[0][1] query = self.tokens[:split_index] response = self.tokens[split_index:] mask = self.token_masks[split_index:] return query, response, mask def show_text(self, show_legend=False): """ Print the text history. """ if not is_rich_available(): warnings.warn("install rich to display text") return text = Text(self.text) text.stylize(self.prompt_color, self.text_spans[0][0], self.text_spans[1][0]) for i, (start, end) in enumerate(self.text_spans[1:]): if self.system_spans[i + 1]: text.stylize(self.system_color, start, end) else: text.stylize(self.model_color, start, end) text.append(f"\n\nReward: {self.reward}", style=self.reward_color) print(text) if show_legend: self.show_colour_legend() def show_tokens(self, tokenizer, show_legend=False): """ Print the history tokens. """ if not is_rich_available(): warnings.warn("install rich to display tokens") return text = Text() prompt_end = self.token_spans[0][1] for i, (token, mask) in enumerate(zip(self.tokens, self.token_masks)): if i < prompt_end: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.prompt_color) text.append(" ") elif mask == 0: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.system_color) text.append(" ") else: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.model_color) text.append(" ") text.append(f"\n\nReward: {self.reward}", style=self.reward_color) print(text) if show_legend: self.show_colour_legend() def show_colour_legend(self): """ Print the colour legend. """ if not is_rich_available(): warnings.warn("install rich to display colour legend") return text = Text("\n\n(Colour Legend: ") text.append("Prompt", style=self.prompt_color) text.append("|") text.append("System", style=self.system_color) text.append("|") text.append("Model", style=self.model_color) text.append("|") text.append("Reward", style=self.reward_color) text.append(")") print(text) class TextEnvironment: """ The TextEnvironment enables interaction of a LLM with an environment using tools. """ def __init__( self, model=None, tokenizer=None, tools=None, reward_fn=None, prompt=None, max_turns=4, max_tool_reponse=100, max_length=None, generation_kwargs=None, ): """ Initialize TextEnvironment. Args: model (`PreTrainedModelWrapper`): The model to use for generation. tokenizer (`transformers.PreTrainedTokenizer`): The tokenizer to use for generation. tools (list): A list of tools to use for interaction. reward_fn (function): A function that takes a string and returns a reward. prompt (str): The base prompt to use for generation. Is prepended to the tasks. max_turns (Optional[int]): The maximum number of turns to allow. max_tool_response (Optional[int]): The maximum number of characters to allow in a tool response. max_length (Optional[int]): The maximum number of tokens to allow in an episode. generation_kwargs (Optional[dict]): A dictionary of keyword arguments to pass to the model's generate method. """ self.model = model self.tokenizer = tokenizer self.prompt = prompt if isinstance(tools, dict): self.tools = tools else: self.tools = {tool.__class__.__name__: tool for tool in tools} self.reward_fn = reward_fn self.max_length = max_length self.request_token = "<request>" self.call_token = "<call>" self.response_token = "<response>" self.submit_token = "<submit>" self.max_turns = max_turns self.max_tool_response = max_tool_reponse if generation_kwargs is None: self.generation_kwargs = dict() else: self.generation_kwargs = generation_kwargs self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder") self.current_device = extract_model_from_parallel(self.model).pretrained_model.device def run(self, queries, **rewards_kwargs): """ Run the environment on a list of queries. Args: queries (list[str]): A list of queries to run the model in the environment on. """ turns = 0 queries = [self.prompt + task for task in queries] queries_tokens = [ self.tokenizer(query, return_tensors="pt").input_ids[0].to(self.model.pretrained_model.device) for query in queries ] histories = [TextHistory(q, qt, system=True) for q, qt in zip(queries, queries_tokens)] while any(not history.completed for history in histories) and turns < self.max_turns: histories = self.generate(histories) histories = self.tasks_end_check(histories) # TODO: make this parallel rather than for-loop for i in range(len(histories)): histories[i] = self.step(histories[i]) histories = self.tasks_end_check(histories, model_turn=False) turns += 1 self.compute_reward(histories, **rewards_kwargs) # convert a list of (q, r, m) tuples to lists of all qs, rs, and ms respectively queries, responses, masks = map(list, zip(*[history.split_query_response_tokens() for history in histories])) rewards = [history.reward for history in histories] return queries, responses, masks, rewards, histories def step(self, history): """ Step the environment forward one turn. Args: history (`TextHistory`): The history to step forward. """ truncated, ended = self.task_end_check(history) if ended: history.complete(truncated=truncated) if history.completed: return history tool, query = self.parse_tool_call(history.last_text_segment) if tool is None or query is None: response = f"Unknown tool call: {history.last_text_segment}" else: if tool not in self.tools: response = f"Unknown tool {tool}." try: response = self.tools[tool](query) except Exception as error: response = f"Tool error: {str(error)}" if len(response) > self.max_tool_response: response = response[: (self.max_tool_response - 3)] + "..." history.append_segment( response + self.response_token, self.tokenizer(response + self.response_token, return_tensors="pt") .input_ids[0] .to(self.model.pretrained_model.device), system=True, ) return history def parse_tool_call(self, text): """ Parse request string. Expected format: <request><tool_name>query<call> """ result = re.search(f"(?<={self.request_token}).*?(?={self.call_token})", text, re.DOTALL) # if we can't find a <request>/<call> span we return none if result is None: return None, None else: extracted_text = result.group() result = re.search(r"<(.*?)>", extracted_text) # if we can't find a tool name we return none if result is None: return None, None else: tool = result.group(1) # split off the tool name query = ">".join(extracted_text.split(">")[1:]) return tool, query def compute_reward(self, histories, **reward_kwargs): """ Compute the reward for a list of histories. """ rewards = self.reward_fn([history.last_text_segment for history in histories], **reward_kwargs) for history, reward in zip(histories, rewards): history.reward = reward return histories def generate(self, histories): """ Generate responses for a list of histories. """ active_histories = [i for i, history in enumerate(histories) if not history.completed] query_tensors = [histories[i].tokens for i in active_histories] response_tensors = self._generate_batched(query_tensors) response_texts = self.tokenizer.batch_decode(response_tensors) for i, response_text, response_tensor in zip(active_histories, response_texts, response_tensors): histories[i].append_segment(response_text, response_tensor, system=False) return histories def tasks_end_check(self, histories, model_turn=True): """ Check if the current generation sequences have finished. """ for history in histories: if not history.completed: truncated, ended = self.task_end_check(history, model_turn=model_turn) if ended: history.complete(truncated=truncated) return histories def task_end_check(self, history, model_turn=True): """ Check if the current generation sequence has finished. """ truncated = False ended = False if history.completed: return truncated, ended if self.max_length is not None and len(self.tokenizer(history.text).input_ids[0]) > self.max_length: truncated = True ended = True elif self.tokenizer.eos_token in history.text: ended = True elif model_turn and not ( (self.request_token in history.last_text_segment and self.call_token in history.last_text_segment) or self.submit_token in history.last_text_segment ): ended = True elif self.submit_token in history.last_text_segment: ended = True return truncated, ended def _generate_batched( self, query_tensors, batch_size: int = 16, pad_to_multiple_of: Optional[int] = None, ): """ Generate responses for a list of query tensors. args: query_tensors (list[torch.Tensor]): A list of query tensors to generate responses for. batch_size (int): The batch size to use for generation. pad_to_multiple_of (int): The padding length to use for generation. """ outputs = [] padding_side_default = self.tokenizer.padding_side if not self.is_encoder_decoder: self.tokenizer.padding_side = "left" # in case we have fewer examples than bs batch_size = min(len(query_tensors), batch_size) for i in range(0, len(query_tensors), batch_size): # prevent overflow if query tensors are not even multiple of bs end_index = min(len(query_tensors), i + batch_size) batch = query_tensors[i:end_index] batch_mask = [torch.ones_like(element) for element in batch] inputs = {"input_ids": batch, "attention_mask": batch_mask} padded_inputs = self.tokenizer.pad( inputs, padding=True, max_length=None, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ).to(self.current_device) stopping_criteria = StringStoppingCriteria([self.call_token, self.submit_token], self.tokenizer) self.generation_kwargs["stopping_criteria"] = StoppingCriteriaList([stopping_criteria]) generations = extract_model_from_parallel(self.model).generate(**padded_inputs, **self.generation_kwargs) for generation, mask, generated_tokens in zip( generations, padded_inputs["attention_mask"], stopping_criteria.generated_tokens ): if not self.is_encoder_decoder: output = generation[(1 - mask).sum() :] # remove padding else: output = generation if not self.is_encoder_decoder: output = output[(mask).sum() :] # remove prompt # remove chunk generated after stopping criteria in batch mode outputs.append(output[:generated_tokens]) self.tokenizer.padding_side = padding_side_default return outputs
trl/trl/environment/base_environment.py/0
{ "file_path": "trl/trl/environment/base_environment.py", "repo_id": "trl", "token_count": 7661 }
444
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Optional from transformers import TrainingArguments from ..import_utils import is_sklearn_available @dataclass class BCOConfig(TrainingArguments): r""" BCOConfig collects all training arguments related to the [`BCOTrainer`] class. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int`, *optional*, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int`, *optional*, defaults to `None`): The maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int`, *optional*, defaults to `None`): The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, defaults to 0.1): The beta factor in BCO loss. Higher beta means less divergence from the initial policy. label_pad_token_id (`int`, defaults to `-100`): The label pad token id. This argument is required if you want to use the default data collator. padding_value (`int`, defaults to `0`): The padding value if it is different to the tokenizer's pad_token_id. truncation_mode (`str`, defaults to `keep_end`): The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, defaults to `False`): Whether to sample and log generations during evaluation step. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): If no model is provided, we need to know if the model_init returns an encoder-decoder. precompute_ref_log_probs (`bool`, defaults to `False`): Flag to precompute reference model log probabilities for training and evaluation datasets. This is useful if you want to train without the reference model and reduce the total GPU memory needed. model_init_kwargs: (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the model from a string. ref_model_init_kwargs: (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the ref model from a string. dataset_num_proc: (`Optional[int]`, *optional*, defaults to `None`): Number of processes to use for processing the datasets. prompt_sample_size: (`int`, defaults to 1024): Number of prompts that are fed to density ratio classifier. min_density_ratio: (`float`, defaults to 0.5): The minimum value of the density ratio. The estimated density ratio is clamped to this value. max_density_ratio: (`float`, defaults to 10.0): The maximum value of the density ratio. The estimated density ratio is clamped to this value. """ max_length: Optional[int] = None """The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.""" max_prompt_length: Optional[int] = None """The maximum length of the prompt. This argument is required if you want to use the default data collator.""" max_completion_length: Optional[int] = None """The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder.""" beta: float = 0.1 """The beta factor in BCO loss. Higher beta means less divergence from the initial policy.""" label_pad_token_id: int = -100 padding_value: int = None truncation_mode: str = "keep_end" generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None precompute_ref_log_probs: bool = False model_init_kwargs: Optional[Dict] = None ref_model_init_kwargs: Optional[Dict] = None dataset_num_proc: Optional[int] = None # BCO config prompt_sample_size: int = 1024 min_density_ratio: float = 0.5 max_density_ratio: float = 10.0 def __post_init__(self): super().__post_init__() if not is_sklearn_available(): raise ImportError( "You need to install scikit-learn to use `BCOTrainer` " "You can install it with `pip install scikit-learn`." )
trl/trl/trainer/bco_config.py/0
{ "file_path": "trl/trl/trainer/bco_config.py", "repo_id": "trl", "token_count": 1784 }
445
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Optional from transformers import TrainingArguments @dataclass class ORPOConfig(TrainingArguments): r""" ORPOConfig collects all training arguments related to the [`ORPOTrainer`] class. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int`, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int`, defaults to `None`): The maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int`, defaults to `None`): The maximum length of the completions. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, defaults to 0.1): The beta factor in ORPO loss (lambda/alpha in paper/code) that is the weight of the relative loss ratio in the SFT loss. label_pad_token_id (`int`, defaults to `-100`): The label pad token id. This argument is required if you want to use the default data collator. padding_value (`int`, defaults to `None`): The padding value if it is different to the tokenizer's pad_token_id. truncation_mode (`str`, defaults to `keep_end`): The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, defaults to `False`): Whether to sample and log generations during evaluation step. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): If no model is provided, we need to know if the model_init returns an encoder-decoder. disable_dropout (`bool`, defaults to `True`): Whether or not to disable dropouts in `model`. model_init_kwargs (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the model from a string dataset_num_proc (`Optional[int]`, *optional*): The number of workers to use to tokenize the data. Defaults to None. """ max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None beta: float = 0.1 disable_dropout: bool = True label_pad_token_id: int = -100 padding_value: int = None truncation_mode: str = "keep_end" generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None model_init_kwargs: Optional[Dict] = None dataset_num_proc: Optional[int] = None
trl/trl/trainer/orpo_config.py/0
{ "file_path": "trl/trl/trainer/orpo_config.py", "repo_id": "trl", "token_count": 1172 }
446
// File only needed for VSCode users to have proper Docker based interpreters { "name": "accelerate_dev_environment", "build": { // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment "dockerfile": "../docker/accelerate-cpu/Dockerfile" // "dockerfile": "../docker/accelerate-gpu/Dockerfile" }, "runArgs": [ // ACTION NEEDED: uncomment the next line if your local machine has GPUs available // "--gpus", "all", // Enable the docker container to access system resources "--ipc", "host" ], "remoteEnv": { "PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}" }, "customizations": { "vscode": { "extensions": [ // Ensure we have IntelliSense in VSCode when running inside container "ms-python.python" ] } }, "workspaceFolder": "/workspaces/accelerate", // Need git for VSCode to color code modifications. Only runs when building environment. "onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'" }
accelerate/.devcontainer/devcontainer.json/0
{ "file_path": "accelerate/.devcontainer/devcontainer.json", "repo_id": "accelerate", "token_count": 459 }
0
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
accelerate/LICENSE/0
{ "file_path": "accelerate/LICENSE", "repo_id": "accelerate", "token_count": 3168 }
1
# Builds GPU docker image of PyTorch specifically # Uses multi-staged approach to reduce size # Stage 1 # Use base conda image to reduce time FROM continuumio/miniconda3:latest AS compile-image # Specify py version # Note: DeepSpeed beyond v0.12.6 requires py 3.10 ENV PYTHON_VERSION=3.10 # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Create our conda env RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip # We don't install pytorch here yet since CUDA isn't available # instead we use the direct torch wheel ENV PATH /opt/conda/envs/accelerate/bin:$PATH # Activate our bash shell RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Activate the conda env, install mpy4pi, and install torch + accelerate RUN source activate accelerate && conda install -c conda-forge mpi4py RUN source activate accelerate && \ python3 -m pip install --no-cache-dir \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \ --extra-index-url https://download.pytorch.org/whl/cu117 RUN python3 -m pip install --no-cache-dir bitsandbytes # Stage 2 FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image COPY --from=compile-image /opt/conda /opt/conda ENV PATH /opt/conda/bin:$PATH # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* RUN echo "source activate accelerate" >> ~/.profile # Activate the virtualenv CMD ["/bin/bash"]
accelerate/docker/accelerate-gpu-deepspeed/Dockerfile/0
{ "file_path": "accelerate/docker/accelerate-gpu-deepspeed/Dockerfile", "repo_id": "accelerate", "token_count": 560 }
2
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Gradient Synchronization PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system. This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints when using the `ddp` module. These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. This happens when the model is wrapped with `DistributedDataParallel`: ```python import torch.nn as nn from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10, 10) ddp_model = DistributedDataParallel(model) ``` In ๐Ÿค— Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. ```diff + from accelerate import Accelerator + accelerator = Accelerator() import torch.nn as nn - from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10,10) + model = accelerator.prepare(model) ``` ## The slowdown in gradient accumulation You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when training in a distributed setup. But how does this risk slowing down your code? In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected at specific points and these must also occur at roughly the same time before moving on. The most direct example is when you update model parameters through `optimizer.step()`. Without gradient accumulation, all instances of the model need to have updated their gradients computed, collated, and updated before moving on to the next batch of data. When performing gradient accumulation, you accumulate `n` loss gradients and skip `optimizer.step()` until `n` batches have been reached. As all training processes only need to synchronize by the time `optimizer.step()` is called, without any modification to your training step, this needless inter-process communication can cause a significant slowdown. How can you avoid this overhead? ## Solving the slowdown problem Since you are skipping model parameter updates when training on these batches, their gradients do not need to be synchronized until the point where `optimizer.step()` is actually called. PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager that is added to your model after converting it to DDP. Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this context manager will trigger the synchronization. See an example below: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader) - 1): with ddp_model.no_sync(): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() ``` In ๐Ÿค— Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), `ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way: ```diff ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader)-1): - with ddp_model.no_sync(): + with accelerator.no_sync(model): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs, targets) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final gradient accumulation API: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for batch in dataloader: with accelerator.accumulate(model): optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. ## Just how much of a slowdown is there, and easy mistakes you can make To set up a realistic example, consider the following setup: * Two single-GPU T4 nodes and one node with two GPUs * Each GPU is a T4, and are hosted on GCP * The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script * Batch size per GPU is 16, and gradients are accumulated every 4 steps All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments). If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted from when these GPUs communicate to each other during unnecessary periods. By how much? Reference: - Baseline: uses no synchronization practices discussed here - `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward` - `no_sync`: using the `no_sync` pattern properly - `accumulate`: using [`~Accelerator.accumulate`] properly Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup: | | Baseline | `no_sync` improperly | `no_sync` | `accumulate`| | :---------: | :-------: | :------------------: | :-------: | :---------: | | Multi-Node | 2ยฑ0.01s | 2.13ยฑ0.08s | **0.91ยฑ0.11s** | **0.91ยฑ0.11s** | | Single Node | 0.50ยฑ0.01s | 0.50ยฑ0.01s | **0.41ยฑ0.015s** | **0.41ยฑ0.015s** | As you can see, if you are not careful about how you set up your gradient synchronization, you can get upwards of more than a 2x slowdown during training! If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in `gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you. ### `no_sync` requires additional GPU memory when using FSDP Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory. Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`. See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`. | Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16) | :-------------: | :-----------------: | :-----------------: | :-----------------: mixtral 8x7B | 69G | OOM | 69G > [!WARNING] > Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
accelerate/docs/source/concept_guides/gradient_synchronization.md/0
{ "file_path": "accelerate/docs/source/concept_guides/gradient_synchronization.md", "repo_id": "accelerate", "token_count": 2842 }
3
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Kwargs Handlers The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects related to distributed training or mixed precision are created. ## AutocastKwargs [[autodoc]] AutocastKwargs ## DistributedDataParallelKwargs [[autodoc]] DistributedDataParallelKwargs ## FP8RecipeKwargs [[autodoc]] utils.FP8RecipeKwargs ## ProfileKwargs [[autodoc]] utils.ProfileKwargs ## GradScalerKwargs [[autodoc]] GradScalerKwargs ## InitProcessGroupKwargs [[autodoc]] InitProcessGroupKwargs ## KwargsHandler [[autodoc]] utils.KwargsHandler
accelerate/docs/source/package_reference/kwargs.md/0
{ "file_path": "accelerate/docs/source/package_reference/kwargs.md", "repo_id": "accelerate", "token_count": 385 }
4
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Performing gradient accumulation with ๐Ÿค— Accelerate Gradient accumulation is a technique where you can train on bigger batch sizes than your machine would normally be able to fit into memory. This is done by accumulating gradients over several batches, and only stepping the optimizer after a certain number of batches have been performed. While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient method for doing so and you may experience considerable slowdowns! In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in ๐Ÿค— Accelerate, which can total to adding just one new line of code! This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: ```python device = "cuda" model.to(device) gradient_accumulation_steps = 2 for index, batch in enumerate(training_dataloader): inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps loss.backward() if (index + 1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Converting it to ๐Ÿค— Accelerate First the code shown earlier will be converted to utilize ๐Ÿค— Accelerate without the special gradient accumulation helper: ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for index, batch in enumerate(training_dataloader): inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps + accelerator.backward(loss) if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` <Tip warning={true}> In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](../concept_guides/gradient_synchronization)! </Tip> ## Letting ๐Ÿค— Accelerate handle gradient accumulation All that is left now is to let ๐Ÿค— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`~Accelerator.backward`]: ```diff from accelerate import Accelerator - accelerator = Accelerator() + accelerator = Accelerator(gradient_accumulation_steps=2) ``` Alternatively, you can pass in a `gradient_accumulation_plugin` parameter to the [`Accelerator`] object's `__init__`, which will allow you to further customize the gradient accumulation behavior. Read more about that in the [GradientAccumulationPlugin](../package_reference/accelerator#accelerate.utils.GradientAccumulationPlugin) docs. From here you can use the [`~Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you! You just wrap it around the entire training part of our code: ```diff - for index, batch in enumerate(training_dataloader): + for batch in training_dataloader: + with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) ``` You can remove all the special checks for the step number and the loss adjustment: ```diff - loss = loss / gradient_accumulation_steps accelerator.backward(loss) - if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. <Tip> Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are training on. ๐Ÿค— Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this. </Tip> <Tip warning={true}> The [`state.GradientState`] is sync'd with the active dataloader being iterated upon. As such it assumes naively that when we have reached the end of the dataloader everything will sync and a step will be performed. To disable this, set `sync_with_dataloader` to be `False` in the [`GradientAccumulationPlugin`]: ```{python} from accelerate import Accelerator from accelerate.utils import GradientAccumulationPlugin plugin = GradientAccumulationPlugin(sync_with_dataloader=False) accelerator = Accelerator(..., gradient_accumulation_plugin=plugin) ``` </Tip> ## The finished code Below is the finished implementation for performing gradient accumulation with ๐Ÿค— Accelerate ```python from accelerate import Accelerator accelerator = Accelerator(gradient_accumulation_steps=2) model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` <Tip warning={true}> It's important that **only one forward/backward** should be done inside the context manager `with accelerator.accumulate(model)`. </Tip> To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](../concept_guides/gradient_synchronization) ## Self-contained example Here is a self-contained example that you can run to see gradient accumulation in action with ๐Ÿค— Accelerate: ```python import torch import copy from accelerate import Accelerator from accelerate.utils import set_seed from torch.utils.data import TensorDataset, DataLoader # seed set_seed(0) # define toy inputs and labels x = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8.]) y = torch.tensor([2., 4., 6., 8., 10., 12., 14., 16.]) gradient_accumulation_steps = 4 batch_size = len(x) // gradient_accumulation_steps # define dataset and dataloader dataset = TensorDataset(x, y) dataloader = DataLoader(dataset, batch_size=batch_size) # define model, optimizer and loss function model = torch.zeros((1, 1), requires_grad=True) model_clone = copy.deepcopy(model) criterion = torch.nn.MSELoss() model_optimizer = torch.optim.SGD([model], lr=0.02) accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) model, model_optimizer, dataloader = accelerator.prepare(model, model_optimizer, dataloader) model_clone_optimizer = torch.optim.SGD([model_clone], lr=0.02) print(f"initial model weight is {model.mean().item():.5f}") print(f"initial model weight is {model_clone.mean().item():.5f}") for i, (inputs, labels) in enumerate(dataloader): with accelerator.accumulate(model): inputs = inputs.view(-1, 1) print(i, inputs.flatten()) labels = labels.view(-1, 1) outputs = inputs @ model loss = criterion(outputs, labels) accelerator.backward(loss) model_optimizer.step() model_optimizer.zero_grad() loss = criterion(x.view(-1, 1) @ model_clone, y.view(-1, 1)) model_clone_optimizer.zero_grad() loss.backward() model_clone_optimizer.step() print(f"w/ accumulation, the final model weight is {model.mean().item():.5f}") print(f"w/o accumulation, the final model weight is {model_clone.mean().item():.5f}") ``` ``` initial model weight is 0.00000 initial model weight is 0.00000 0 tensor([1., 2.]) 1 tensor([3., 4.]) 2 tensor([5., 6.]) 3 tensor([7., 8.]) w/ accumulation, the final model weight is 2.04000 w/o accumulation, the final model weight is 2.04000 ```
accelerate/docs/source/usage_guides/gradient_accumulation.md/0
{ "file_path": "accelerate/docs/source/usage_guides/gradient_accumulation.md", "repo_id": "accelerate", "token_count": 2733 }
5
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # New Code # # We need a different `get_dataloaders` function that will build dataloaders by index def get_fold_dataloaders( accelerator: Accelerator, dataset: DatasetDict, train_idxs: List[int], valid_idxs: List[int], batch_size: int = 16 ): """ Gets a set of train, valid, and test dataloaders for a particular fold Args: accelerator (`Accelerator`): The main `Accelerator` object train_idxs (list of `int`): The split indices for the training dataset valid_idxs (list of `int`): The split indices for the validation dataset batch_size (`int`): The size of the minibatch. Default is 16 """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = DatasetDict( { "train": dataset["train"].select(train_idxs), "validation": dataset["train"].select(valid_idxs), "test": dataset["validation"], } ) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) test_dataloader = DataLoader( tokenized_datasets["test"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader, test_dataloader def training_function(config, args): # New Code # test_predictions = [] # Download the dataset datasets = load_dataset("glue", "mrpc") # Create our splits kfold = StratifiedKFold(n_splits=int(args.num_folds)) # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE set_seed(seed) # New Code # # Create our folds: folds = kfold.split(np.zeros(datasets["train"].num_rows), datasets["train"]["label"]) test_references = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(folds): train_dataloader, eval_dataloader, test_dataloader = get_fold_dataloaders( accelerator, datasets, train_idxs, valid_idxs, ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # We also run predictions on the test set at the very end fold_predictions = [] for step, batch in enumerate(test_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) fold_predictions.append(predictions.cpu()) if i == 0: # We need all of the test predictions test_references.append(references.cpu()) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(fold_predictions, dim=0)) # We now need to release all our memory and get rid of the current model, optimizer, etc model, optimizer = accelerator.free_memory(model, optimizer) # New Code # # Finally we check the accuracy of our folded results: test_references = torch.cat(test_references, dim=0) preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(args.num_folds)).argmax(dim=-1) test_metric = metric.compute(predictions=preds, references=test_references) accelerator.print("Average test metrics from all folds:", test_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") # New Code # parser.add_argument("--num_folds", type=int, default=3, help="The number of splits to perform across the dataset") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/by_feature/cross_validation.py/0
{ "file_path": "accelerate/examples/by_feature/cross_validation.py", "repo_id": "accelerate", "token_count": 4478 }
6
# Similar to FSDP, we set the distributed type as DEEPSPEED distributed_type: DEEPSPEED # With DeepSpeed, we utilize a deepspeed config file for the entire configuration deepspeed_config: # Can also be any of the config json's in accelerate/examples/deepspeed_config_templates deepspeed_config_file: ../deepspeed_config_templates/zero_stage1_config.json # If using ZeRO-3 and wanting to load big models in, this should be set to `true` so # `transformers` uses the right `init` function zero3_init_flag: false # true # Finally we need to specify the number of GPUs to use num_processes: 2 # Optionally we can set the mixed precision now instead of in the deepspeed config file, # however this requires the `fp16` and `bf16` options to be set to `auto` in the deepspeed config file # mixed_precision: "bf16"
accelerate/examples/config_yaml_templates/deepspeed.yaml/0
{ "file_path": "accelerate/examples/config_yaml_templates/deepspeed.yaml", "repo_id": "accelerate", "token_count": 239 }
7
# Distributed inference examples with PiPPy This repo contains a variety of tutorials for using the [PiPPy](https://github.com/PyTorch/PiPPy) pipeline parallelism library with accelerate. You will find examples covering: 1. How to trace the model using `accelerate.prepare_pippy` 2. How to specify inputs based on what the model expects (when to use `kwargs`, `args`, and such) 3. How to gather the results at the end. ## Installation This requires the `main` branch of accelerate (or a version at least 0.27.0), `pippy` version of 0.2.0 or greater, and at least python 3.9. Please install using `pip install .` to pull from the `setup.py` in this repo, or run manually: ```bash pip install 'accelerate>=0.27.0' 'torchpippy>=0.2.0' ``` ## Running code You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script: ```bash accelerate launch bert.py ``` Or: ```bash accelerate launch --num_processes {NUM_GPUS} bert.py ``` Or: ```bash torchrun --nproc-per-node {NUM_GPUS} bert.py ``` ## General speedups One can expect that PiPPy will outperform native model parallism by a multiplicative factor since all GPUs are running at all times with inputs, rather than one input being passed through a GPU at a time waiting for the prior to finish. Below are some benchmarks we have found when using the accelerate-pippy integration for a few models when running on 2x4090's: ### Bert | | Accelerate/Sequential | PiPPy + Accelerate | |---|---|---| | First batch | 0.2137s | 0.3119s | | Average of 5 batches | 0.0099s | **0.0062s** | ### GPT2 | | Accelerate/Sequential | PiPPy + Accelerate | |---|---|---| | First batch | 0.1959s | 0.4189s | | Average of 5 batches | 0.0205s | **0.0126s** | ### T5 | | Accelerate/Sequential | PiPPy + Accelerate | |---|---|---| | First batch | 0.2789s | 0.3809s | | Average of 5 batches | 0.0198s | **0.0166s** |
accelerate/examples/inference/pippy/README.md/0
{ "file_path": "accelerate/examples/inference/pippy/README.md", "repo_id": "accelerate", "token_count": 646 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage3(Scene): def construct(self): mem = Rectangle(height=0.5,width=0.5) meta_mem = Rectangle(height=0.25,width=0.25) fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) cpu_left_col_base = [mem.copy() for i in range(6)] cpu_right_col_base = [mem.copy() for i in range(6)] cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) cpu_text = Text("CPU", font_size=24) cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) cpu.move_to([-2.5,-.5,0]) self.add(cpu) gpu_base = [mem.copy() for i in range(4)] gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) gpu_text = Text("GPU", font_size=24) gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) gpu.move_to([-1,-1,0]) self.add(gpu) model_base = [mem.copy() for i in range(6)] model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) model_text = Text("Model", font_size=24) model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) model.move_to([3, -1., 0]) self.add(model) model_arr = [] model_cpu_arr = [] model_meta_arr = [] for i,rect in enumerate(model_base): rect.set_stroke(YELLOW) cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) cpu_target.set_x(cpu_target.get_x()+0.1) elif i == 3: cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.) else: cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.) self.add(cpu_target) model_cpu_arr.append(cpu_target) self.add(*model_arr, *model_cpu_arr, *model_meta_arr) checkpoint_base = [mem.copy() for i in range(6)] checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0) checkpoint_text = Text("Loaded Checkpoint", font_size=24) checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) checkpoint.move_to([3, .5, 0]) self.add(checkpoint) ckpt_arr = [] ckpt_cpu_arr = [] for i,rect in enumerate(checkpoint_base): target = fill.copy().set_fill(BLUE, opacity=0.7) target.move_to(rect) ckpt_arr.append(target) cpu_target = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i+1]) else: cpu_target.move_to(cpu_right_col_base[i-5]) ckpt_cpu_arr.append(cpu_target) self.add(*ckpt_arr, *ckpt_cpu_arr) key = Square(side_length=2.2) key.move_to([-5, 2, 0]) key_text = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>โ—</span> Empty Model", font_size=18, ) key_text.move_to([-5, 2.4, 0]) self.add(key_text, key) blue_text = MarkupText( f"<span fgcolor='{BLUE}'>โ—</span> Checkpoint", font_size=18, ) blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) self.add(blue_text) step_3 = MarkupText( f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.', font_size=24 ) step_3.move_to([2, 2, 0]) disk_left_col_base = [meta_mem.copy() for i in range(6)] disk_right_col_base = [meta_mem.copy() for i in range(6)] disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0) disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0) disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0) disk_text = Text("Disk", font_size=24) disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) disk.move_to([-4.,-1.25,0]) self.play( Write(step_3, run_time=3), Write(disk_text, run_time=1), Create(disk_rects, run_time=1) ) animations = [] for i,rect in enumerate(ckpt_cpu_arr): target = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i]).scale(0.5) animations.append(MoveToTarget(target, run_time=1.5)) self.play(*animations) self.play(FadeOut(step_3)) step_4 = MarkupText( f'Then, the checkpoint is removed from memory\nthrough garbage collection.', font_size=24 ) step_4.move_to([2, 2, 0]) self.play( Write(step_4, run_time=3) ) self.play( FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr), ) self.wait()
accelerate/manim_animations/big_model_inference/stage_3.py/0
{ "file_path": "accelerate/manim_animations/big_model_inference/stage_3.py", "repo_id": "accelerate", "token_count": 2891 }
9
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from pathlib import Path from typing import List import numpy as np import torch from safetensors.torch import load_model from torch.cuda.amp import GradScaler from .utils import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_mlu_available, is_torch_xla_available, is_xpu_available, save, ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm from .logging import get_logger from .state import PartialState logger = get_logger(__name__) def save_accelerator_state( output_dir: str, model_states: List[dict], optimizers: list, schedulers: list, dataloaders: list, process_index: int, step: int, scaler: GradScaler = None, save_on_each_node: bool = False, safe_serialization: bool = True, ): """ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. <Tip> If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native `pickle`. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. model_states (`List[torch.nn.Module]`): A list of model states optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers dataloaders (`List[torch.utils.data.DataLoader]`): A list of dataloader instances to save their sampler states process_index (`int`): The current process index in the Accelerator state step (`int`): The current step in the internal step tracker scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional gradient scaler instance to save save_on_each_node (`bool`, *optional*): Whether to save on every node, or only the main node. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). """ output_dir = Path(output_dir) # Model states for i, state in enumerate(model_states): weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME if i > 0: weights_name = weights_name.replace(".", f"_{i}.") output_model_file = output_dir.joinpath(weights_name) save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization) logger.info(f"Model weights saved in {output_model_file}") # Optimizer states for i, opt in enumerate(optimizers): state = opt.state_dict() optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" output_optimizer_file = output_dir.joinpath(optimizer_name) save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Optimizer state saved in {output_optimizer_file}") # Scheduler states for i, scheduler in enumerate(schedulers): state = scheduler.state_dict() scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" output_scheduler_file = output_dir.joinpath(scheduler_name) save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Scheduler state saved in {output_scheduler_file}") # DataLoader states for i, dataloader in enumerate(dataloaders): sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" output_sampler_file = output_dir.joinpath(sampler_name) # Only save if we have our custom sampler from .data_loader import IterableDatasetShard, SeedableRandomSampler if isinstance(dataloader.dataset, IterableDatasetShard): sampler = dataloader.get_sampler() if isinstance(sampler, SeedableRandomSampler): save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False) if getattr(dataloader, "use_stateful_dataloader", False): dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin" output_dataloader_state_dict_file = output_dir.joinpath(dataloader_state_dict_name) state_dict = dataloader.state_dict() torch.save(state_dict, output_dataloader_state_dict_file) logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}") # GradScaler state if scaler is not None: state = scaler.state_dict() output_scaler_file = output_dir.joinpath(SCALER_NAME) torch.save(state, output_scaler_file) logger.info(f"Gradient scaler state saved in {output_scaler_file}") # Random number generator states states = {} states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" states["step"] = step states["random_state"] = random.getstate() states["numpy_random_seed"] = np.random.get_state() states["torch_manual_seed"] = torch.get_rng_state() if is_xpu_available(): states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all() if is_mlu_available(): states["torch_mlu_manual_seed"] = torch.mlu.get_rng_state_all() else: states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() if is_torch_xla_available(): states["xm_seed"] = xm.get_rng_state() output_states_file = output_dir.joinpath(states_name) torch.save(states, output_states_file) logger.info(f"Random states saved in {output_states_file}") return output_dir def load_accelerator_state( input_dir, models, optimizers, schedulers, dataloaders, process_index, scaler=None, map_location=None, **load_model_func_kwargs, ): """ Loads states of the models, optimizers, scaler, and RNG generators from a given directory. Args: input_dir (`str` or `os.PathLike`): The name of the folder to load all relevant weights and states. models (`List[torch.nn.Module]`): A list of model instances optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers process_index (`int`): The current process index in the Accelerator state scaler (`torch.cuda.amp.GradScaler`, *optional*): An optional *GradScaler* instance to load map_location (`str`, *optional*): What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". load_model_func_kwargs (`dict`, *optional*): Additional arguments that can be passed to the model's `load_state_dict` method. Returns: `dict`: Contains the `Accelerator` attributes to override while loading the state. """ # stores the `Accelerator` attributes to override override_attributes = dict() if map_location not in [None, "cpu", "on_device"]: raise TypeError( "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`" ) if map_location is None: map_location = "cpu" elif map_location == "on_device": map_location = PartialState().device input_dir = Path(input_dir) # Model states for i, model in enumerate(models): ending = f"_{i}" if i > 0 else "" input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors") if input_model_file.exists(): load_model(model, input_model_file, device=str(map_location), **load_model_func_kwargs) else: # Load with torch input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin") state_dict = torch.load(input_model_file, map_location=map_location) model.load_state_dict(state_dict, **load_model_func_kwargs) logger.info("All model weights loaded successfully") # Optimizer states for i, opt in enumerate(optimizers): optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" input_optimizer_file = input_dir.joinpath(optimizer_name) optimizer_state = torch.load(input_optimizer_file, map_location=map_location) optimizers[i].load_state_dict(optimizer_state) logger.info("All optimizer states loaded successfully") # Scheduler states for i, scheduler in enumerate(schedulers): scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" input_scheduler_file = input_dir.joinpath(scheduler_name) scheduler.load_state_dict(torch.load(input_scheduler_file)) logger.info("All scheduler states loaded successfully") for i, dataloader in enumerate(dataloaders): sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" input_sampler_file = input_dir.joinpath(sampler_name) # Only load if we have our custom sampler from .data_loader import IterableDatasetShard, SeedableRandomSampler if isinstance(dataloader.dataset, IterableDatasetShard): sampler = dataloader.get_sampler() if isinstance(sampler, SeedableRandomSampler): sampler = dataloader.set_sampler(torch.load(input_sampler_file)) if getattr(dataloader, "use_stateful_dataloader", False): dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin" input_dataloader_state_dict_file = input_dir.joinpath(dataloader_state_dict_name) if input_dataloader_state_dict_file.exists(): state_dict = torch.load(input_dataloader_state_dict_file) dataloader.load_state_dict(state_dict) logger.info("All dataloader sampler states loaded successfully") # GradScaler state if scaler is not None: input_scaler_file = input_dir.joinpath(SCALER_NAME) scaler.load_state_dict(torch.load(input_scaler_file)) logger.info("GradScaler state loaded successfully") # Random states try: states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl")) if "step" in states: override_attributes["step"] = states["step"] random.setstate(states["random_state"]) np.random.set_state(states["numpy_random_seed"]) torch.set_rng_state(states["torch_manual_seed"]) if is_xpu_available(): torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"]) if is_mlu_available(): torch.mlu.set_rng_state_all(states["torch_mlu_manual_seed"]) else: torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) if is_torch_xla_available(): xm.set_rng_state(states["xm_seed"]) logger.info("All random states loaded successfully") except Exception: logger.info("Could not load random states") return override_attributes def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False): """ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` """ # Should this be the right way to get a qual_name type value from `obj`? save_location = Path(path) / f"custom_checkpoint_{index}.pkl" logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node) def load_custom_state(obj, path, index: int = 0): """ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl` """ load_location = f"{path}/custom_checkpoint_{index}.pkl" logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") obj.load_state_dict(torch.load(load_location, map_location="cpu"))
accelerate/src/accelerate/checkpointing.py/0
{ "file_path": "accelerate/src/accelerate/checkpointing.py", "repo_id": "accelerate", "token_count": 5333 }
10
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class AcceleratedScheduler: """ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed precision training) When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always step the scheduler to account for it. Args: scheduler (`torch.optim.lr_scheduler._LRScheduler`): The scheduler to wrap. optimizers (one or a list of `torch.optim.Optimizer`): The optimizers used. step_with_optimizer (`bool`, *optional*, defaults to `True`): Whether or not the scheduler should be stepped at each optimizer step. split_batches (`bool`, *optional*, defaults to `False`): Whether or not the dataloaders split one batch across the different processes (so batch size is the same regardless of the number of processes) or create batches on each process (so batch size is the original batch size multiplied by the number of processes). """ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False): self.scheduler = scheduler self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers] self.split_batches = split_batches self.step_with_optimizer = step_with_optimizer self.gradient_state = GradientState() def step(self, *args, **kwargs): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*args, **kwargs) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*args, **kwargs) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step num_processes = AcceleratorState().num_processes for _ in range(num_processes): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler, "total_steps"): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*args, **kwargs) else: self.scheduler.step(*args, **kwargs) # Passthroughs def get_last_lr(self): return self.scheduler.get_last_lr() def state_dict(self): return self.scheduler.state_dict() def load_state_dict(self, state_dict): self.scheduler.load_state_dict(state_dict) def get_lr(self): return self.scheduler.get_lr() def print_lr(self, *args, **kwargs): return self.scheduler.print_lr(*args, **kwargs)
accelerate/src/accelerate/scheduler.py/0
{ "file_path": "accelerate/src/accelerate/scheduler.py", "repo_id": "accelerate", "token_count": 1577 }
11
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test file to ensure that in general certain situational setups for notebooks work. """ import os import time from multiprocessing import Queue from pytest import mark, raises from torch.distributed.elastic.multiprocessing.errors import ChildFailedError from accelerate import PartialState, notebook_launcher from accelerate.test_utils import require_bnb from accelerate.utils import is_bnb_available def basic_function(): # Just prints the PartialState print(f"PartialState:\n{PartialState()}") def tough_nut_function(queue: Queue): if queue.empty(): return trial = queue.get() if trial > 0: queue.put(trial - 1) raise RuntimeError("The nut hasn't cracked yet! Try again.") print(f"PartialState:\n{PartialState()}") def bipolar_sleep_function(sleep_sec: int): state = PartialState() if state.process_index % 2 == 0: raise RuntimeError("I'm an even process. I don't like to sleep.") else: time.sleep(sleep_sec) NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1)) def test_can_initialize(): notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) @mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test static rendezvous backends") def test_static_rdzv_backend(): notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES, rdzv_backend="static") @mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test c10d rendezvous backends") def test_c10d_rdzv_backend(): notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES, rdzv_backend="c10d") @mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test fault tolerance") def test_fault_tolerant(max_restarts: int = 3): queue = Queue() queue.put(max_restarts) notebook_launcher(tough_nut_function, (queue,), num_processes=NUM_PROCESSES, max_restarts=max_restarts) @mark.skipif(NUM_PROCESSES < 2, reason="Need at least 2 processes to test monitoring") def test_monitoring(monitor_interval: float = 0.01, sleep_sec: int = 100): start_time = time.time() with raises(ChildFailedError, match="I'm an even process. I don't like to sleep."): notebook_launcher( bipolar_sleep_function, (sleep_sec,), num_processes=NUM_PROCESSES, monitor_interval=monitor_interval, ) assert time.time() - start_time < sleep_sec, "Monitoring did not stop the process in time." @require_bnb def test_problematic_imports(): with raises(RuntimeError, match="Please keep these imports"): import bitsandbytes as bnb # noqa: F401 notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) def main(): print("Test basic notebook can be ran") test_can_initialize() print("Test static rendezvous backend") test_static_rdzv_backend() print("Test c10d rendezvous backend") test_c10d_rdzv_backend() print("Test fault tolerant") test_fault_tolerant() print("Test monitoring") test_monitoring() if is_bnb_available(): print("Test problematic imports (bnb)") test_problematic_imports() if NUM_PROCESSES > 1: PartialState().destroy_process_group() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_notebook.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_notebook.py", "repo_id": "accelerate", "token_count": 1371 }
12
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math import os from abc import ABC from functools import partial import torch import torch.nn.functional as F from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .imports import is_megatron_lm_available from .operations import recursively_apply, send_to_device if is_megatron_lm_available(): from megatron import ( get_args, get_num_microbatches, get_tensorboard_writer, get_tokenizer, print_rank_last, ) from megatron.arguments import ( _add_data_args, _add_validation_args, core_transformer_config_from_args, parse_args, validate_args, ) from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint from megatron.core import mpu, tensor_parallel from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.distributed import finalize_model_grads from megatron.core.enums import ModelType from megatron.core.parallel_state import get_tensor_model_parallel_group, get_tensor_model_parallel_src_rank from megatron.core.pipeline_parallel import get_forward_backward_func from megatron.core.utils import get_model_config from megatron.data.dataset_utils import build_train_valid_test_datasets from megatron.global_vars import set_global_variables from megatron.initialize import ( _compile_dependencies, _init_autoresume, _initialize_distributed, _set_random_seed, set_jit_fusion_options, write_args_to_tensorboard, ) from megatron.model import BertModel, Float16Module, GPTModel, T5Model from megatron.model.classification import Classification from megatron.optimizer import get_megatron_optimizer from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor from megatron.text_generation.generation import ( beam_search_and_return_on_first_stage, generate_tokens_probs_and_return_on_first_stage, ) from megatron.tokenizer.tokenizer import _vocab_size_with_padding from megatron.training import ( build_train_valid_test_data_iterators, get_optimizer_param_scheduler, num_floating_point_operations, setup_model_and_optimizer, train_step, training_log, ) from megatron.utils import ( average_losses_across_data_parallel_group, calc_params_l2_norm, get_ltor_masks_and_position_ids, unwrap_model, ) # model utilities def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): """Build the model.""" args = get_args() mode = "pre-training" if args.pretraining_flag else "fine-tuning" if args.rank == 0: print(f"Building {args.model_type_name} model in the {mode} mode.") print( "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." ) config = core_transformer_config_from_args(args) if args.model_type_name == "bert": if args.pretraining_flag: num_tokentypes = 2 if args.bert_binary_head else 0 model = BertModel( config=config, num_tokentypes=num_tokentypes, add_binary_head=args.bert_binary_head, parallel_output=True, pre_process=pre_process, post_process=post_process, ) else: model = Classification( config=config, num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process, ) elif args.model_type_name == "gpt": model = GPTModel( config=config, num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process, ) elif args.model_type_name == "t5": model = T5Model( config=config, num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process, add_encoder=add_encoder, add_decoder=add_decoder, ) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") return model def prepare_model_optimizer_scheduler(accelerator): accelerator.print("Preparing model optimizer scheduler") args = get_args() if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None: if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None: raise ValueError( "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`." ) custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func) optimizer = prepare_optimizer(accelerator, model) scheduler = prepare_scheduler(accelerator, optimizer, scheduler=None) else: model_type = ModelType.encoder_or_decoder if args.model_type_name == "t5": model_type = ModelType.encoder_and_decoder model_provider_func_ = model_provider_func if accelerator.state.megatron_lm_plugin.custom_model_provider_function is not None: model_provider_func_ = accelerator.state.megatron_lm_plugin.custom_model_provider_function (model, optimizer, scheduler) = setup_model_and_optimizer( model_provider_func_, model_type, no_wd_decay_cond=args.no_wd_decay_cond, scale_lr_cond=args.scale_lr_cond, lr_mult=args.lr_mult, ) args.model_len = len(model) return model, optimizer, scheduler # dataloader utilities class MegatronLMDummyDataLoader: """ Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training Args: **dataset_kwargs: Megatron data arguments. """ def __init__(self, **dataset_kwargs): parser = argparse.ArgumentParser() parser = _add_data_args(parser) parser = _add_validation_args(parser) data_args = parser.parse_known_args() self.dataset_args = vars(data_args[0]) self.dataset_args.update(dataset_kwargs) self.dataset_args["megatron_dataset_flag"] = True def set_megatron_data_args(self): args = get_args() for key, value in self.dataset_args.items(): old_value = getattr(args, key, "") if old_value != value: print( f"WARNING: MegatronLMDummyDataLoader overriding arguments for " f"{key}:{old_value} with {key}:{value}" ) setattr(args, key, value) def get_train_valid_test_datasets_provider(self, accelerator): def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() dataset_args = { "data_prefix": args.data_path if isinstance(args.data_path, (list, tuple)) else [args.data_path], "splits_string": args.split, "train_valid_test_num_samples": train_val_test_num_samples, "seed": args.seed, } if args.model_type_name == "bert": dataset_args.update( { "max_seq_length": args.seq_length, "binary_head": args.bert_binary_head, } ) elif args.model_type_name == "gpt": dataset_args.update( { "max_seq_length": args.seq_length, } ) elif args.model_type_name == "t5": dataset_args.update( { "max_seq_length": args.encoder_seq_length, "max_seq_length_dec": args.decoder_seq_length, "dataset_type": "t5", } ) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args) return train_ds, valid_ds, test_ds if accelerator.state.megatron_lm_plugin.custom_megatron_datasets_provider_function is not None: return accelerator.state.megatron_lm_plugin.custom_megatron_datasets_provider_function try: args = get_args() # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source if args.model_type_name == "bert": from pretrain_bert import train_valid_test_datasets_provider train_valid_test_datasets_provider.is_distributed = True return train_valid_test_datasets_provider elif args.model_type_name == "gpt": from pretrain_gpt import train_valid_test_datasets_provider train_valid_test_datasets_provider.is_distributed = True return train_valid_test_datasets_provider elif args.model_type_name == "t5": from pretrain_t5 import train_valid_test_datasets_provider train_valid_test_datasets_provider.is_distributed = True return train_valid_test_datasets_provider except ImportError: pass return train_valid_test_datasets_provider def build_train_valid_test_data_iterators(self, accelerator): args = get_args() train_valid_test_dataset_provider = self.get_train_valid_test_datasets_provider(accelerator) if args.virtual_pipeline_model_parallel_size is not None: train_data_iterator = [] valid_data_iterator = [] test_data_iterator = [] for i in range(getattr(args, "model_len", 0)): mpu.set_virtual_pipeline_model_parallel_rank(i) iterators = build_train_valid_test_data_iterators(train_valid_test_dataset_provider) train_data_iterator.append(iterators[0]) valid_data_iterator.append(iterators[1]) test_data_iterator.append(iterators[2]) else: train_data_iterator, valid_data_iterator, test_data_iterator = build_train_valid_test_data_iterators( train_valid_test_dataset_provider ) return train_data_iterator, valid_data_iterator, test_data_iterator def _handle_megatron_data_iterator(accelerator, data_iterator): class DummyMegatronDataloader: def __iter__(self): return self def __next__(self): return {} is_data_iterator_empty = data_iterator is None is_src_data_iterator_empty = torch.tensor(is_data_iterator_empty, dtype=torch.bool, device=accelerator.device) torch.distributed.broadcast( is_src_data_iterator_empty, get_tensor_model_parallel_src_rank(), group=get_tensor_model_parallel_group() ) if not is_src_data_iterator_empty and is_data_iterator_empty: return DummyMegatronDataloader() return data_iterator def prepare_data_loader(accelerator, dataloader): accelerator.print("Preparing dataloader") args = get_args() if not args.megatron_dataset_flag: from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader micro_batch_size = args.micro_batch_size * args.num_micro_batches kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS} if kwargs["batch_size"] is None: if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler): kwargs["sampler"].batch_size = micro_batch_size else: del kwargs["sampler"] del kwargs["shuffle"] del kwargs["batch_size"] kwargs["batch_sampler"].batch_size = micro_batch_size else: del kwargs["batch_sampler"] kwargs["batch_size"] = micro_batch_size dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs) # split_batches: # Megatron only needs to fetch different data between different dp groups, # and does not need to split the data within the dp group. return prepare_data_loader( dataloader, accelerator.device, num_processes=mpu.get_data_parallel_world_size(), process_index=mpu.get_data_parallel_rank(), split_batches=False, put_on_device=True, rng_types=accelerator.rng_types.copy(), dispatch_batches=accelerator.dispatch_batches, ) else: if args.consumed_samples is not None: ( args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples, ) = args.consumed_samples else: args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0 args.micro_batch_size = args.micro_batch_size * args.num_micro_batches # In order to be compatible with data in transform format, # it needs to increase the size of mbs first, # and then split the large batch data into some mbs. ( train_data_iterator, valid_data_iterator, test_data_iterator, ) = dataloader.build_train_valid_test_data_iterators(accelerator) args.micro_batch_size = args.micro_batch_size // args.num_micro_batches train_data_iterator = _handle_megatron_data_iterator( accelerator=accelerator, data_iterator=train_data_iterator ) valid_data_iterator = _handle_megatron_data_iterator( accelerator=accelerator, data_iterator=valid_data_iterator ) test_data_iterator = _handle_megatron_data_iterator(accelerator=accelerator, data_iterator=test_data_iterator) return train_data_iterator, valid_data_iterator, test_data_iterator # optimizer utilities class MegatronLMOptimizerWrapper(AcceleratedOptimizer): def __init__(self, optimizer): super().__init__(optimizer, device_placement=False, scaler=None) def zero_grad(self, set_to_none=None): pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed def step(self): pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed @property def step_was_skipped(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" return self.optimizer.skipped_iter def prepare_optimizer(accelerator, model): accelerator.print("Preparing optimizer") args = get_args() return get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult) # scheduler utilities class MegatronLMDummyScheduler: """ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training loop when scheduler config is specified in the deepspeed config file. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. total_num_steps (int): Total number of steps. warmup_num_steps (int): Number of steps for warmup. **kwargs (additional keyword arguments, *optional*): Other arguments. """ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs): self.optimizer = optimizer self.total_num_steps = total_num_steps self.warmup_num_steps = warmup_num_steps self.kwargs = kwargs class MegatronLMSchedulerWrapper(AcceleratedScheduler): def __init__(self, scheduler, optimizers): super().__init__(scheduler, optimizers) def step(self, *args, **kwargs): return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed def prepare_scheduler(accelerator, optimizer, scheduler): accelerator.print("Preparing scheduler") scheduler = get_optimizer_param_scheduler(optimizer) return scheduler class AbstractTrainStep(ABC): """Abstract class for batching, forward pass and loss handler.""" def __init__(self, name): super().__init__() self.name = name def get_batch_func(self, accelerator, megatron_dataset_flag): pass def get_forward_step_func(self): pass def get_loss_func(self, accelerator): pass class BertTrainStep(AbstractTrainStep): """ Bert train step class. Args: args (`argparse.Namespace`): Megatron-LM arguments. """ def __init__(self, accelerator, args): super().__init__("BertTrainStep") self.get_batch = self.get_batch_func(accelerator, args.megatron_dataset_flag) self.loss_func = self.get_loss_func(accelerator, args.pretraining_flag, args.num_labels) self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head) if not args.model_return_dict: self.model_output_class = None else: from transformers.modeling_outputs import SequenceClassifierOutput self.model_output_class = SequenceClassifierOutput def get_batch_func(self, accelerator, megatron_dataset_flag): def get_batch_megatron(data_iterator): """Build the batch.""" # Items and their type. keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = tensor_parallel.broadcast_data(keys, data, datatype) # Unpack. tokens = data_b["text"].long() types = data_b["types"].long() sentence_order = data_b["is_random"].long() loss_mask = data_b["loss_mask"].float() lm_labels = data_b["labels"].long() padding_mask = data_b["padding_mask"].long() return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask def get_batch_transformer(data_iterator): """Build the batch.""" data = next(data_iterator) data = send_to_device(data, torch.cuda.current_device()) # Unpack. tokens = data["input_ids"].long() padding_mask = data["attention_mask"].long() if "token_type_ids" in data: types = data["token_type_ids"].long() else: types = None if "labels" in data: lm_labels = data["labels"].long() loss_mask = (data["labels"] != -100).to(torch.float) else: lm_labels = None loss_mask = None if "next_sentence_label" in data: sentence_order = data["next_sentence_label"].long() else: sentence_order = None return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask if accelerator.state.megatron_lm_plugin.custom_get_batch_function is not None: return accelerator.state.megatron_lm_plugin.custom_get_batch_function if megatron_dataset_flag: try: # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source from pretrain_bert import get_batch return get_batch except ImportError: pass return get_batch_megatron else: return get_batch_transformer def get_loss_func(self, accelerator, pretraining_flag, num_labels): def loss_func_pretrain(loss_mask, sentence_order, output_tensor): lm_loss_, sop_logits = output_tensor lm_loss_ = lm_loss_.float() loss_mask = loss_mask.float() lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() if sop_logits is not None: sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1) sop_loss = sop_loss.float() loss = lm_loss + sop_loss averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss]) return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]} else: loss = lm_loss averaged_losses = average_losses_across_data_parallel_group([lm_loss]) return loss, {"lm loss": averaged_losses[0]} def loss_func_finetune(labels, logits): if num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)): loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, num_labels), labels.view(-1)) else: loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) averaged_losses = average_losses_across_data_parallel_group([loss]) return loss, {"loss": averaged_losses[0]} if accelerator.state.megatron_lm_plugin.custom_loss_function is not None: return accelerator.state.megatron_lm_plugin.custom_loss_function if pretraining_flag: return loss_func_pretrain else: return loss_func_finetune def get_forward_step_func(self, pretraining_flag, bert_binary_head): def forward_step(data_iterator, model): """Forward step.""" tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator) if not bert_binary_head: types = None # Forward pass through the model. if pretraining_flag: output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels) return output_tensor, partial(self.loss_func, loss_mask, sentence_order) else: logits = model(tokens, padding_mask, tokentype_ids=types) return logits, partial(self.loss_func, labels) return forward_step class GPTTrainStep(AbstractTrainStep): """ GPT train step class. Args: args (`argparse.Namespace`): Megatron-LM arguments. """ def __init__(self, accelerator, args): super().__init__("GPTTrainStep") self.get_batch = self.get_batch_func(accelerator, args.megatron_dataset_flag) self.loss_func = self.get_loss_func(accelerator) self.forward_step = self.get_forward_step_func() self.eod_token = args.padded_vocab_size - 1 if args.vocab_file is not None: tokenizer = get_tokenizer() self.eod_token = tokenizer.eod self.reset_position_ids = args.reset_position_ids self.reset_attention_mask = args.reset_attention_mask self.eod_mask_loss = args.eod_mask_loss if not args.model_return_dict: self.model_output_class = None else: from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions self.model_output_class = CausalLMOutputWithCrossAttentions def get_batch_func(self, accelerator, megatron_dataset_flag): def get_batch_megatron(data_iterator): """Generate a batch""" # Items and their type. keys = ["text"] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = tensor_parallel.broadcast_data(keys, data, datatype) # Unpack. tokens_ = data_b["text"].long() labels = tokens_[:, 1:].contiguous() tokens = tokens_[:, :-1].contiguous() # Get the masks and postition ids. attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss ) return tokens, labels, loss_mask, attention_mask, position_ids def get_batch_transformer(data_iterator): data = next(data_iterator) data = {"input_ids": data["input_ids"]} data = send_to_device(data, torch.cuda.current_device()) tokens_ = data["input_ids"].long() padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token tokens_ = torch.concat([tokens_, padding], dim=1) labels = tokens_[:, 1:].contiguous() tokens = tokens_[:, :-1].contiguous() # Get the masks and postition ids. attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True ) return tokens, labels, loss_mask, attention_mask, position_ids if accelerator.state.megatron_lm_plugin.custom_get_batch_function is not None: return accelerator.state.megatron_lm_plugin.custom_get_batch_function if megatron_dataset_flag: try: # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source from pretrain_gpt import get_batch return get_batch except ImportError: pass return get_batch_megatron else: return get_batch_transformer def get_loss_func(self, accelerator): args = get_args() def loss_func(loss_mask, output_tensor): if args.return_logits: losses, logits = output_tensor else: losses = output_tensor losses = losses.float() loss_mask = loss_mask.view(-1).float() if args.context_parallel_size > 1: loss = torch.cat([torch.sum(losses.view(-1) * loss_mask).view(1), loss_mask.sum().view(1)]) torch.distributed.all_reduce(loss, group=mpu.get_context_parallel_group()) loss = loss[0] / loss[1] else: loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() # Check individual rank losses are not NaN prior to DP all-reduce. if args.check_for_nan_in_loss_and_grad: global_rank = torch.distributed.get_rank() assert not loss.isnan(), ( f"Rank {global_rank}: found NaN in local forward loss calculation. " f"Device: {torch.cuda.current_device()}, node: {os.uname()[1]}" ) # Reduce loss for logging. averaged_loss = average_losses_across_data_parallel_group([loss]) output_dict = {"lm loss": averaged_loss[0]} if args.return_logits: output_dict.update({"logits": logits}) return loss, output_dict if accelerator.state.megatron_lm_plugin.custom_loss_function is not None: return accelerator.state.megatron_lm_plugin.custom_loss_function return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) output_tensor = model(tokens, position_ids, attention_mask, labels=labels) return output_tensor, partial(self.loss_func, loss_mask) return forward_step class T5TrainStep(AbstractTrainStep): """ T5 train step class. Args: args (`argparse.Namespace`): Megatron-LM arguments. """ def __init__(self, accelerator, args): super().__init__("T5TrainStep") self.get_batch = self.get_batch_func(accelerator, args.megatron_dataset_flag) self.loss_func = self.get_loss_func(accelerator) self.forward_step = self.get_forward_step_func() if not args.model_return_dict: self.model_output_class = None else: from transformers.modeling_outputs import Seq2SeqLMOutput self.model_output_class = Seq2SeqLMOutput @staticmethod def attn_mask_postprocess(attention_mask): # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = attention_mask.unsqueeze(2) # [b, s, s] attention_mask_bss = attention_mask_b1s * attention_mask_bs1 # Convert attention mask to binary: extended_attention_mask = attention_mask_bss < 0.5 return extended_attention_mask @staticmethod def get_decoder_mask(seq_length, device): attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)) attention_mask = attention_mask < 0.5 return attention_mask @staticmethod def get_enc_dec_mask(attention_mask, dec_seq_length, device): batch_size, _ = attention_mask.shape # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device) attention_mask_bss = attention_mask_bs1 * attention_mask_b1s extended_attention_mask = attention_mask_bss < 0.5 return extended_attention_mask def get_batch_func(self, accelerator, megatron_dataset_flag): def get_batch_megatron(data_iterator): """Build the batch.""" keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = tensor_parallel.broadcast_data(keys, data, datatype) # Unpack. tokens_enc = data_b["text_enc"].long() tokens_dec = data_b["text_dec"].long() labels = data_b["labels"].long() loss_mask = data_b["loss_mask"].float() enc_mask = data_b["enc_mask"] < 0.5 dec_mask = data_b["dec_mask"] < 0.5 enc_dec_mask = data_b["enc_dec_mask"] < 0.5 return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask def get_batch_transformer(data_iterator): """Build the batch.""" data = next(data_iterator) data = send_to_device(data, torch.cuda.current_device()) tokens_enc = data["input_ids"].long() labels = data["labels"].long() loss_mask = (labels != -100).to(torch.float) if "decoder_input_ids" in data: tokens_dec = data["decoder_input_ids"].long() else: tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long) tokens_dec[..., 1:] = labels[..., :-1].clone() tokens_dec[..., 0] = 0 tokens_dec.masked_fill_(tokens_dec == -100, 0) enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long()) dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device) enc_dec_mask = T5TrainStep.get_enc_dec_mask( data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device ) return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask if accelerator.state.megatron_lm_plugin.custom_get_batch_function is not None: return accelerator.state.megatron_lm_plugin.custom_get_batch_function if megatron_dataset_flag: try: # Use '--no-use-pep517 -e' to pip install nvidia's megatron from source from pretrain_t5 import get_batch return get_batch except ImportError: pass return get_batch_megatron else: return get_batch_transformer def get_loss_func(self, accelerator): def loss_func(loss_mask, output_tensor): lm_loss_ = output_tensor.float() lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() loss = lm_loss averaged_losses = average_losses_across_data_parallel_group([lm_loss]) return loss, {"lm loss": averaged_losses[0]} if accelerator.state.megatron_lm_plugin.custom_loss_function is not None: return accelerator.state.megatron_lm_plugin.custom_loss_function return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch( data_iterator ) # Forward model lm_labels output_tensor = model( tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels ) return output_tensor, partial(self.loss_func, loss_mask) return forward_step def finish_mpu_init(): # torch.distributed initialization args = get_args() # Pytorch distributed. _initialize_distributed() # Random seeds for reproducibility. if args.rank == 0: print(f"> setting random seeds to {args.seed} ...") _set_random_seed(args.seed, args.data_parallel_random_init) # intialize megatron setup def initialize(accelerator, extra_args_provider=None, args_defaults={}): accelerator.print("Initializing Megatron-LM") assert torch.cuda.is_available(), "Megatron requires CUDA." # Parse arguments args = parse_args(extra_args_provider, ignore_unknown_args=True) # Set defaults for key, value in args_defaults.items(): if getattr(args, key, None) is not None: if args.rank == 0: print( f"WARNING: overriding default arguments for " f"{key}:{getattr(args, key)} with {key}:{value}", flush=True, ) setattr(args, key, value) if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False): assert args.load is not None, "--use-checkpoints-args requires --load argument" load_args_from_checkpoint(args) validate_args(args) # set global args, build tokenizer, and set adlr-autoresume, # tensorboard-writer, and timers. set_global_variables(args) # Megatron's MPU is the master. Complete initialization right away. finish_mpu_init() # Autoresume. _init_autoresume() # Compile dependencies. _compile_dependencies() # Set pytorch JIT layer fusion options and warmup JIT functions. set_jit_fusion_options() args = get_args() if getattr(args, "padded_vocab_size", None) is None: args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args) if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2: args.bert_binary_head = True else: args.bert_binary_head = False args.iteration = 0 class MegatronEngine(torch.nn.Module): """ Megatron-LM model wrapper Args: accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use. model: Megatron-LM model optimizer: Megatron-LM optimizer lr_scheduler: Megatron-LM lr scheduler """ def __init__(self, accelerator, model, optimizer, scheduler): super().__init__() self.module = model self.base_model = model[0] self.optimizer = optimizer self.scheduler = scheduler args = get_args() if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None: self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class( args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs ) elif args.model_type_name == "bert": self.train_step_handler = BertTrainStep(accelerator, args) elif args.model_type_name == "gpt": self.train_step_handler = GPTTrainStep(accelerator, args) elif args.model_type_name == "t5": self.train_step_handler = T5TrainStep(accelerator, args) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") self.optimizer.skipped_iter = False # Tracking loss. self.total_loss_dict = {} self.eval_total_loss_dict = {} self.iteration = 0 self.report_memory_flag = True self.num_floating_point_operations_so_far = 0 self.module_config = None if args.tensorboard_dir is not None: write_args_to_tensorboard() def get_module_config(self): args = get_args() config = get_model_config(self.module[0]) # Setup some training config params config.grad_scale_func = self.optimizer.scale_loss if isinstance(self.module[0], LocalDDP) and args.overlap_grad_reduce: assert config.no_sync_func is None, ( "When overlap_grad_reduce is True, config.no_sync_func must be None; " "a custom no_sync_func is not supported when overlapping grad-reduce" ) config.no_sync_func = [model_chunk.no_sync for model_chunk in self.module] if len(self.module) == 1: config.no_sync_func = config.no_sync_func[0] if args.delay_grad_reduce: config.grad_sync_func = [model_chunk.start_grad_sync for model_chunk in self.module] if len(self.module) == 1: config.grad_sync_func = config.grad_sync_func[0] if args.overlap_param_gather and args.delay_param_gather: config.param_sync_func = [ lambda x: self.optimizer.finish_param_sync(model_index, x) for model_index in range(len(self.module)) ] if len(self.module) == 1: config.param_sync_func = config.param_sync_func[0] config.finalize_model_grads_func = finalize_model_grads return config def train(self): for model_module in self.module: model_module.train() if self.module_config is None: self.module_config = self.get_module_config() self.log_eval_results() def eval(self): for model_module in self.module: model_module.eval() if self.module_config is None: self.module_config = self.get_module_config() def get_batch_data_iterator(self, batch_data): args = get_args() data_chunks = [] if len(batch_data) > 0: if args.num_micro_batches > 1: for i in range(0, args.num_micro_batches): data_chunks.append( { k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items() } ) else: data_chunks = [batch_data] if len(self.module) > 1: batch_data_iterator = ( [iter(data_chunks) for _ in range(len(self.module))] if len(batch_data) > 0 else [None] * len(self.module) ) else: batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None return batch_data_iterator def train_step(self, **batch_data): """ Training step for Megatron-LM Args: batch_data (:obj:`dict`): The batch data to train on. """ batch_data_iterator = self.get_batch_data_iterator(batch_data) loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad = train_step( forward_step_func=self.train_step_handler.forward_step, data_iterator=batch_data_iterator, model=self.module, optimizer=self.optimizer, opt_param_scheduler=self.scheduler, config=self.module_config, ) self.optimizer.skipped_iter = skipped_iter == 1 return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad def eval_step(self, **batch_data): """ Evaluation step for Megatron-LM Args: batch_data (:obj:`dict`): The batch data to evaluate on. """ args = get_args() batch_data_iterator = self.get_batch_data_iterator(batch_data) forward_backward_func = get_forward_backward_func() loss_dicts = forward_backward_func( forward_step_func=self.train_step_handler.forward_step, data_iterator=batch_data_iterator, model=self.module, num_microbatches=get_num_microbatches(), seq_length=args.seq_length, micro_batch_size=args.micro_batch_size, forward_only=True, ) # Empty unused memory if args.empty_unused_memory_level >= 1: torch.cuda.empty_cache() args.consumed_valid_samples += ( mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # Average loss across microbatches. loss_reduced = {} for key in loss_dicts[0]: losses_reduced_for_key = [x[key] for x in loss_dicts] if len(losses_reduced_for_key[0].shape) == 0: loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) else: loss_reduced[key] = torch.concat(losses_reduced_for_key) return loss_reduced return {} def forward(self, **batch_data): # During training, we use train_step() # model(**batch_data) performs following operations by delegating it to `self.train_step`: # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism # 2. Set grad to zero. # 3. forward pass and backward pass using Pipeline Parallelism # 4. Empty unused memory. # 5. Reduce gradients. # 6. Update parameters. # 7. Gather params when using Distributed Optimizer (Data Parallelism). # 8. Update learning rate if scheduler is specified. # 9. Empty unused memory. # 10. Average loss across microbatches and across DP ranks. # # During evaluation, we use eval_step() args = get_args() if self.module[0].training: loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data) self.iteration += 1 batch_size = mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() args.consumed_train_samples += batch_size self.num_floating_point_operations_so_far += num_floating_point_operations(args, batch_size) if args.tensorboard_dir is not None: # Logging. loss_scale = self.optimizer.get_loss_scale().item() params_norm = None if args.log_params_norm: params_norm = calc_params_l2_norm(self.model) self.report_memory_flag = training_log( loss_dict, self.total_loss_dict, self.optimizer.param_groups[0]["lr"], self.iteration, loss_scale, self.report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad, ) else: loss_dict = self.eval_step(**batch_data) if args.tensorboard_dir is not None: for key in loss_dict: self.eval_total_loss_dict[key] = ( self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] ) self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get( key + "_num_iters", torch.cuda.FloatTensor([0.0]) ) + torch.cuda.FloatTensor([1.0]) loss = torch.tensor(0.0, device=torch.cuda.current_device()) for key in loss_dict: if len(loss_dict[key].shape) == 0: loss += loss_dict[key] logits = None if "logits" in loss_dict: logits = loss_dict["logits"] if self.train_step_handler.model_output_class is not None: return self.train_step_handler.model_output_class(loss=loss, logits=logits) return loss def log_eval_results(self): args = get_args() if args.tensorboard_dir is None or self.iteration == 0: return args = get_args() writer = get_tensorboard_writer() string = f"validation loss at iteration {self.iteration} | " for key in self.eval_total_loss_dict: if key.endswith("_num_iters"): continue value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"] string += f"{key} value: {value} | " ppl = math.exp(min(20, value.item())) if args.pretraining_flag: string += f"{key} PPL: {ppl} | " if writer: writer.add_scalar(f"{key} validation", value.item(), self.iteration) if args.pretraining_flag: writer.add_scalar(f"{key} validation ppl", ppl, self.iteration) length = len(string) + 1 print_rank_last("-" * length) print_rank_last(string) print_rank_last("-" * length) self.eval_total_loss_dict = {} def save_checkpoint(self, output_dir): self.log_eval_results() args = get_args() args.save = output_dir torch.distributed.barrier() save_checkpoint( self.iteration, self.module, self.optimizer, self.scheduler, num_floating_point_operations_so_far=self.num_floating_point_operations_so_far, ) torch.distributed.barrier() def load_checkpoint(self, input_dir): args = get_args() args.load = input_dir args.consumed_train_samples = 0 args.consumed_valid_samples = 0 torch.distributed.barrier() iteration, num_floating_point_operations_so_far = load_checkpoint(self.module, self.optimizer, self.scheduler) torch.distributed.barrier() self.iteration = iteration self.num_floating_point_operations_so_far = num_floating_point_operations_so_far if args.fp16 and self.iteration == 0: self.optimizer.reload_model_params() def megatron_generate( self, inputs, attention_mask=None, max_length=None, max_new_tokens=None, num_beams=None, temperature=None, top_k=None, top_p=None, length_penalty=None, **kwargs, ): """ Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along with sampling. Refer the Megatron-LM repo for more details Args: inputs (torch.Tensor): input ids attention_mask (torch.Tensor, optional): attention mask. Defaults to None. max_length (int, optional): max length of the generated sequence. Defaults to None. Either this or max_new_tokens should be provided. max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None. Either this or max_length should be provided. num_beams (int, optional): number of beams to use for beam search. Defaults to None. temperature (float, optional): temperature for sampling. Defaults to 1.0. top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0. top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0. length_penalty (float, optional): length penalty for beam search. Defaults to None. kwargs: additional key-value arguments """ # checking if required arguments are passed args = get_args() if args.model_type_name != "gpt": raise NotImplementedError("Generate method is not implemented for this model") if args.data_parallel_size > 1: raise ValueError("Generate method requires data parallelism to be 1") if args.sequence_parallel: raise ValueError("Generate method requires sequence parallelism to be False") if args.recompute_granularity is not None: raise ValueError("Checkpoint activations cannot be set for inference") if args.vocab_file is None: raise ValueError("Vocab file is required for inference") # Prepare inputs if max_length is None and max_new_tokens is None: raise ValueError("`max_length` or `max_new_tokens` are required for inference") if temperature is None: temperature = 1.0 elif not (0.0 < temperature <= 100.0): raise ValueError("temperature must be a positive number less than or equal to 100.0") if top_k is None: top_k = 0 elif not (0 <= top_k <= 1000): raise ValueError("top_k must be a positive number less than or equal to 1000") if top_p is None: top_p = 0.0 elif top_p > 0.0 and top_k > 0.0: raise ValueError("top_p and top_k sampling cannot be set together") else: if not (0.0 <= top_p <= 1.0): raise ValueError("top_p must be less than or equal to 1.0") top_p_decay = kwargs.get("top_p_decay", 0.0) if not (0.0 <= top_p_decay <= 1.0): raise ValueError("top_p_decay must be less than or equal to 1.0") top_p_bound = kwargs.get("top_p_bound", 0.0) if not (0.0 <= top_p_bound <= 1.0): raise ValueError("top_p_bound must be less than or equal to 1.0") add_BOS = kwargs.get("add_BOS", False) if not (isinstance(add_BOS, bool)): raise ValueError("add_BOS must be a boolean") beam_width = num_beams if beam_width is not None: if not isinstance(beam_width, int): raise ValueError("beam_width must be an integer") if beam_width < 1: raise ValueError("beam_width must be greater than 0") if inputs.shape[0] > 1: return "When doing beam_search, batch size must be 1" tokenizer = get_tokenizer() stop_token = kwargs.get("stop_token", tokenizer.eod) if stop_token is not None: if not isinstance(stop_token, int): raise ValueError("stop_token must be an integer") if length_penalty is None: length_penalty = 1.0 sizes_list = None prompts_tokens_tensor = None prompts_length_tensor = None if torch.distributed.get_rank() == 0: # Get the prompts length. if attention_mask is None: prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0]) else: prompts_length_tensor = attention_mask.sum(axis=-1).cuda() if max_new_tokens is None: max_new_tokens = max_length - inputs.shape[1] if max_new_tokens <= 0: raise ValueError("max_new_tokens must be greater than 0") if add_BOS: max_length = max_new_tokens + inputs.shape[1] + 1 # making sure that `max_length` is a multiple of 4 to leverage fused kernels max_length = 4 * math.ceil(max_length / 4) max_new_tokens = max_length - (inputs.shape[1] + 1) padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) prompts_tokens_tensor = torch.concat( [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1 ) else: # making sure that `max_length` is a multiple of 4 to leverage fused kernels max_length = max_new_tokens + inputs.shape[1] max_length = 4 * math.ceil(max_length / 4) max_new_tokens = max_length - inputs.shape[1] padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1) # We need the sizes of these tensors for the boradcast sizes_list = [ prompts_tokens_tensor.size(0), # Batch size prompts_tokens_tensor.size(1), ] # Sequence lenght # First, broadcast the sizes. sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0) # Now that we have the sizes, we can boradcast the tokens # and length tensors. sizes = sizes_tensor.tolist() context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0) context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0) # Run the inference random_seed = kwargs.get("random_seed", 0) torch.random.manual_seed(random_seed) unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module)) if beam_width is not None: tokens, _ = beam_search_and_return_on_first_stage( unwrapped_model, context_tokens_tensor, context_length_tensor, beam_width, stop_token=stop_token, num_return_gen=1, length_penalty=length_penalty, ) else: tokens, _, _ = generate_tokens_probs_and_return_on_first_stage( unwrapped_model, context_tokens_tensor, context_length_tensor, return_output_log_probs=False, top_k=top_k, top_p=top_p, top_p_decay=top_p_decay, top_p_bound=top_p_bound, temperature=temperature, use_eod_token_for_early_termination=True, ) return tokens # other utilities def avg_losses_across_data_parallel_group(losses): """ Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group. """ return average_losses_across_data_parallel_group(losses) def gather_across_data_parallel_groups(tensor): """ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks. """ def _gpu_gather_one(tensor): if tensor.ndim == 0: tensor = tensor.clone()[None] output_tensors = [ torch.empty_like(tensor) for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) ] torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) return torch.cat(output_tensors, dim=0) return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
accelerate/src/accelerate/utils/megatron_lm.py/0
{ "file_path": "accelerate/src/accelerate/utils/megatron_lm.py", "repo_id": "accelerate", "token_count": 26898 }
13
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import json import os import pickle import tempfile import time from unittest.mock import patch import psutil import pytest import torch from parameterized import parameterized from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights, load_checkpoint_and_dispatch from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import ( require_bnb, require_multi_gpu, require_non_cpu, require_transformer_engine, slow, torch_device, ) from accelerate.test_utils.testing import ( AccelerateTestCase, require_cuda, require_non_torch_xla, require_torchdata_stateful_dataloader, ) from accelerate.utils import FP8RecipeKwargs, is_torchdata_stateful_dataloader_available, patch_environment from accelerate.utils.dataclasses import DataLoaderConfiguration from accelerate.utils.modeling import get_state_dict_from_offload, load_checkpoint_in_model from accelerate.utils.random import set_seed if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import StatefulDataLoader class ModelWithTiedWeights(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(2, 4) self.linear2 = torch.nn.Linear(4, 2) self.linear2.weight = self.linear1.weight self.linear2.bias = self.linear1.bias def forward(self, x): return self.linear2(self.linear1(x)) def create_components(tied_weights=False): model = ModelWithTiedWeights() if tied_weights else torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3]))) valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6]))) return model, optimizer, scheduler, train_dl, valid_dl class ModelForTest(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(3, 4) self.batchnorm = torch.nn.BatchNorm1d(4) self.linear2 = torch.nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) def create_dataloaders_for_test(batch_size=3, n_train_batches: int = 12, n_valid_batches: int = 2, num_workers=0): "Generates a tuple of dummy DataLoaders to test with" def get_dataset(n_batches): x = torch.randn(batch_size * n_batches, 3) y = torch.randn(batch_size * n_batches, 5) return TensorDataset(x, y) train_dataset = get_dataset(n_train_batches) valid_dataset = get_dataset(n_valid_batches) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers) valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, num_workers=num_workers) return (train_dataloader, valid_dataloader) def get_signature(model): return sum(param.abs().sum().item() for param in model.parameters()) def load_random_weights(model): if isinstance(model, torch.nn.Linear): state = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict() elif isinstance(model, ModelWithTiedWeights): state = ModelWithTiedWeights().state_dict() model.load_state_dict(state) def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = "use_safetensors" if param.args[0] is True else "use_pytorch" if len(param.args) > 1: param_based_name += "_tied_weights" if param.args[1] is True else "" if len(param.args) > 2: param_based_name += f"_num_workers_{param.args[2]}" if len(param.args) > 3: param_based_name += "_dispatch_batches" if param.args[3] is True else "_no_dispatch_batches" return f"{func.__name__}_{param_based_name}" class AcceleratorTester(AccelerateTestCase): # Should be removed after 1.0.0 release def test_deprecated_values(self): # Test defaults accelerator = Accelerator() assert accelerator.split_batches is False, "split_batches should be False by default" assert accelerator.dispatch_batches is None, "dispatch_batches should be None by default" assert accelerator.even_batches is True, "even_batches should be True by default" assert accelerator.use_seedable_sampler is False, "use_seedable_sampler should be False by default" # Pass some arguments only with pytest.warns(FutureWarning) as cm: accelerator = Accelerator( dispatch_batches=True, split_batches=False, ) deprecation_warning = str(cm.list[0].message) assert accelerator.split_batches is False, "split_batches should be True" assert accelerator.dispatch_batches is True, "dispatch_batches should be True" assert accelerator.even_batches is True, "even_batches should be True by default" assert accelerator.use_seedable_sampler is False, "use_seedable_sampler should be False by default" assert "dispatch_batches" in deprecation_warning assert "split_batches" in deprecation_warning assert "even_batches" not in deprecation_warning assert "use_seedable_sampler" not in deprecation_warning # Pass in some arguments, but with their defaults with pytest.warns(FutureWarning) as cm: accelerator = Accelerator( even_batches=True, use_seedable_sampler=False, ) deprecation_warning = str(cm.list[0].message) assert "even_batches" in deprecation_warning assert accelerator.even_batches is True assert "use_seedable_sampler" in deprecation_warning assert accelerator.use_seedable_sampler is False def test_partial_state_after_reset(self): # Verifies that custom getattr errors will be thrown # if the state is reset, but only if trying to # get expected attributes state = PartialState() assert state.num_processes > 0 with self.assertRaises(AttributeError) as cm: state.someotherthing assert "'PartialState' object has no attribute" in str(cm.exception) assert "This happens if `PartialState._reset_state()`" not in str(cm.exception) with self.assertRaises(AttributeError) as cm: state._reset_state() state.num_processes assert "`PartialState` object has no attribute" in str(cm.exception) assert "This happens if `PartialState._reset_state()`" in str(cm.exception) state.someotherthing = "MyValue" assert state.someotherthing == "MyValue" def test_accelerator_state_after_reset(self): # Verifies that custom getattr errors will be thrown # if the state is reset, but only if trying to # get expected attributes accelerator = Accelerator() assert accelerator.num_processes > 0 with self.assertRaises(AttributeError) as cm: accelerator.state.someotherthing assert "'AcceleratorState' object has no attribute" in str(cm.exception) assert "This happens if `AcceleratorState._reset_state()`" not in str(cm.exception) with self.assertRaises(AttributeError) as cm: accelerator.state._reset_state() accelerator.num_processes assert "`AcceleratorState` object has no attribute" in str(cm.exception) assert "This happens if `AcceleratorState._reset_state()`" in str(cm.exception) accelerator.state.someotherthing = "MyValue" assert accelerator.state.someotherthing == "MyValue" @require_non_cpu def test_accelerator_can_be_reinstantiated(self): _ = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type in ["cuda", "mps", "npu", "xpu", "xla"] with self.assertRaises(ValueError): _ = Accelerator(cpu=True) @require_cuda def test_setting_cpu_affinity(self): with patch_environment(accelerate_cpu_affinity=1, accelerate_debug_mode=1): with self.assertLogs("accelerate.utils.environment", level="INFO") as cm: _ = Accelerator() assert any("Assigning" in log for log in cm.output) assert any("cpu cores to process" in log for log in cm.output) def test_mutable_states(self): accelerator = Accelerator() state = GradientState() assert state.num_steps == 1 accelerator.gradient_accumulation_steps = 4 assert state.num_steps == 4 assert state.sync_gradients is True accelerator.sync_gradients = False assert state.sync_gradients is False GradientState._reset_state() def test_prepared_objects_are_referenced(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert prepared_model in accelerator._models assert prepared_optimizer in accelerator._optimizers assert prepared_scheduler in accelerator._schedulers assert prepared_train_dl in accelerator._dataloaders assert prepared_valid_dl in accelerator._dataloaders def test_free_memory_dereferences_prepared_components(self): accelerator = Accelerator() # Free up refs with empty_cache() and gc.collect() accelerator.free_memory() model, optimizer, scheduler, train_dl, valid_dl = create_components() free_cpu_ram_before = psutil.virtual_memory().available // 1024 // 1024 model, optimizer, scheduler, train_dl, valid_dl = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl ) # Short sleep here makes this test more reliable time.sleep(1e-3) model, optimizer, scheduler, train_dl, valid_dl = accelerator.free_memory( model, optimizer, scheduler, train_dl, valid_dl ) free_cpu_ram_after = psutil.virtual_memory().available // 1024 // 1024 assert len(accelerator._models) == 0 assert len(accelerator._optimizers) == 0 assert len(accelerator._schedulers) == 0 assert len(accelerator._dataloaders) == 0 # The less-than comes *specifically* from CUDA CPU things/won't be present on CPU builds assert free_cpu_ram_after <= free_cpu_ram_before @require_non_torch_xla def test_env_var_device(self): """Tests that setting the torch device with ACCELERATE_TORCH_DEVICE overrides default device.""" PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*args, **kwargs): pass with patch("torch.cuda.set_device", noop), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"): accelerator = Accelerator() assert str(accelerator.state.device) == "cuda:64" @parameterized.expand([(True, True), (True, False), (False, False)], name_func=parameterized_custom_name_func) def test_save_load_model(self, use_safetensors, tied_weights): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components(tied_weights) accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # make sure loaded weights match accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_model(self, use_safetensors): accelerator = Accelerator() model = torch.nn.Linear(10, 10) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_model(model, tmpdirname, safe_serialization=use_safetensors) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_sharded_model(self, use_safetensors): accelerator = Accelerator() inputs = torch.randn(3, 3) model = ModelForTest() expected = model(inputs) with tempfile.TemporaryDirectory() as tmpdirname: # By setting it to 100, we will split the model int 3 shards accelerator.save_model(model, tmpdirname, safe_serialization=use_safetensors, max_shard_size=100) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) output = model(inputs) assert torch.allclose(expected, output, atol=1e-5) @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_model_offload(self, use_safetensors): accelerator = Accelerator() device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} inputs = torch.randn(3, 3) model = ModelForTest() expected = model(inputs) with tempfile.TemporaryDirectory() as tmp_dir: accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load and save offloaded model load_checkpoint_and_dispatch(model, tmp_dir, device_map=device_map, offload_folder=tmp_dir) accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load weights that were saved from the offloaded model load_checkpoint_and_dispatch(model, tmp_dir) output = model(inputs) assert torch.allclose(expected, output, atol=1e-5) @parameterized.expand([True, False], name_func=parameterized_custom_name_func) @require_non_cpu def test_get_state_dict_from_offload(self, use_safetensors): accelerator = Accelerator() device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "disk"} model = ModelForTest() offloaded_layer_weight = model.linear2.weight with tempfile.TemporaryDirectory() as tmp_dir: accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load model with offloaded layers load_checkpoint_and_dispatch(model, tmp_dir, device_map=device_map, offload_folder=tmp_dir) cpu_onloaded_layer = get_state_dict_from_offload( model.linear2, "linear2.weight", {"linear2.weight": ""}, device_to_put_offload="cpu" ) device_onloaded_layer = get_state_dict_from_offload( model.linear2, "linear2.weight", {"linear2.weight": ""}, device_to_put_offload=0 ) cpu_onloaded_layer_weight = cpu_onloaded_layer["linear2.weight"] device_onloaded_layer_weight = device_onloaded_layer["linear2.weight"] assert torch.allclose(offloaded_layer_weight, cpu_onloaded_layer_weight) assert torch.allclose( offloaded_layer_weight, device_onloaded_layer_weight.to("cpu") ) # must be on the same device for torch.allclose() assert cpu_onloaded_layer_weight.device.type == "cpu" assert device_onloaded_layer_weight.device.type == torch_device @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_load_model_with_hooks(self, use_safetensors): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) # saving hook def save_config(models, weights, output_dir): config = {"class_name": models[0].__class__.__name__} with open(os.path.join(output_dir, "data.json"), "w") as f: json.dump(config, f) # loading hook def load_config(models, input_dir): with open(os.path.join(input_dir, "data.json")) as f: config = json.load(f) models[0].class_name = config["class_name"] save_hook = accelerator.register_save_state_pre_hook(save_config) load_hook = accelerator.register_load_state_pre_hook(load_config) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match with hooks load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 # mode.class_name is loaded from config assert model.class_name == model.__class__.__name__ # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match with hooks removed load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks removed accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 # mode.class_name is NOT loaded from config assert model.class_name != model.__class__.__name__ def test_accelerator_none(self): """Just test that passing None to accelerator.prepare() works.""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = None # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) assert dummy_obj is None def test_is_accelerator_prepared(self): """Checks that `_is_accelerator_prepared` is set properly""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = [1, 2, 3] # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) assert ( getattr(dummy_obj, "_is_accelerate_prepared", False) is False ), "Dummy object should have `_is_accelerate_prepared` set to `True`" assert ( getattr(model, "_is_accelerate_prepared", False) is True ), "Model is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(optimizer, "_is_accelerate_prepared", False) is True ), "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(scheduler, "_is_accelerate_prepared", False) is True ), "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(train_dl, "_is_accelerate_prepared", False) is True ), "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(valid_dl, "_is_accelerate_prepared", False) is True ), "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" @require_cuda @slow @require_bnb def test_accelerator_bnb(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map={"": 0}, ) accelerator = Accelerator() # This should work model = accelerator.prepare(model) @require_cuda @slow @require_bnb def test_accelerator_bnb_cpu_error(self): """Tests that the accelerator can be used with the BNB library. This should fail as we are trying to load a model that is loaded between cpu and gpu""" from transformers import AutoModelForCausalLM accelerator = Accelerator() with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = "cpu" model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True ) # This should not work and get value error with self.assertRaises(ValueError): model = accelerator.prepare(model) @require_non_torch_xla @slow @require_bnb @require_multi_gpu def test_accelerator_bnb_multi_device(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM if torch_device == "cuda": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_GPU} elif torch_device == "npu": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_NPU} else: raise ValueError(f"{torch_device} is not supported in test_accelerator_bnb_multi_device.") with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should not work and get value error with self.assertRaises(ValueError): _ = accelerator.prepare(model) PartialState._reset_state() @require_non_torch_xla @slow @require_bnb @require_multi_gpu def test_accelerator_bnb_multi_device_no_distributed(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should work _ = accelerator.prepare(model) @require_non_cpu def test_accelerator_cpu_flag_prepare(self): model = torch.nn.Linear(10, 10) sgd = torch.optim.SGD(model.parameters(), lr=0.01) accelerator = Accelerator(cpu=True) _ = accelerator.prepare(sgd) @require_transformer_engine def test_can_unwrap_model_te(self): model, optimizer, *_ = create_components() fp8_recipe = FP8RecipeKwargs(backend="TE") accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[fp8_recipe]) inputs = torch.randn(10, 2).to(torch_device) model, optimizer = accelerator.prepare(model, optimizer) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) @require_non_cpu def test_can_unwrap_model_fp16(self): # test for a regression introduced in #872 # before the fix, after unwrapping with keep_fp32_wrapper=False, there would be the following error: # Linear.forward() missing 1 required positional argument: 'input' model = create_components()[0] accelerator = Accelerator(mixed_precision="fp16") inputs = torch.randn(10, 2).to(torch_device) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) def test_can_unwrap_model(self): model = create_components()[0] accelerator = Accelerator(mixed_precision="no", cpu=True) inputs = torch.randn(10, 2) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) # Ideally would be a parameterized test which works with either stateful or non-stateful dataloaders, but dependencies are a bit awkward. @require_torchdata_stateful_dataloader def test_prepared_objects_are_referenced_with_stateful_dataloader(self): """Test that setting `use_stateful_dataloader=True` in `DataLoaderConfiguration` prepares a `StatefulDataLoader` object instead of a `DataLoader` object.""" dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=True) accelerator = Accelerator(dataloader_config=dataloader_config) model, optimizer, scheduler, train_dl, valid_dl = create_components() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert prepared_model in accelerator._models assert prepared_optimizer in accelerator._optimizers assert prepared_scheduler in accelerator._schedulers assert prepared_train_dl in accelerator._dataloaders assert prepared_valid_dl in accelerator._dataloaders assert isinstance(prepared_train_dl, StatefulDataLoader) assert isinstance(prepared_valid_dl, StatefulDataLoader) @parameterized.expand( itertools.product([True, False], [True, False], [0, 2], [True, False]), name_func=parameterized_custom_name_func, ) @require_torchdata_stateful_dataloader def test_save_model_with_stateful_dataloader(self, use_safetensors, tied_weights, num_workers, dispatch_batches): """ Test that saving and loading a model with a stateful dataloader returns the same model, and that the dataloader's iterator is restored properly.""" set_seed(42) n_train_batches = 64 # Use enough batches to ensure we can get partial iterations on large compute dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, use_stateful_dataloader=True) accelerator = Accelerator(dataloader_config=dataloader_config) model, optimizer, scheduler, train_dl, valid_dl = create_components(tied_weights) train_dl, valid_dl = create_dataloaders_for_test(n_train_batches=n_train_batches, num_workers=num_workers) model = ModelForTest() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert isinstance(prepared_train_dl, StatefulDataLoader) assert isinstance(prepared_valid_dl, StatefulDataLoader) # Perform 3 training iterations to ensure the dataloader's iterator is advanced num_batches_to_skip = 3 model.train() untrained_batches = [] with tempfile.TemporaryDirectory() as tmpdirname: for step, batch in enumerate(prepared_train_dl): x, y = batch outputs = prepared_model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) prepared_optimizer.step() prepared_scheduler.step() prepared_optimizer.zero_grad() if step == num_batches_to_skip - 1: # Save the state once we've gone through a few batches accelerator.save_state(f"{tmpdirname}/state", safe_serialization=use_safetensors) if step >= num_batches_to_skip: untrained_batches.append(batch) not_skipped_batches = accelerator.gather(untrained_batches) # We then unwrap the trained model unwrapped_model = accelerator.unwrap_model(prepared_model) original_linear1 = unwrapped_model.linear1.weight.clone() original_batchnorm = unwrapped_model.batchnorm.weight.clone() original_linear2 = unwrapped_model.linear2.weight.clone() # Resume the state accelerator.load_state(f"{tmpdirname}/state") # Train this to the end of the DataLoader batches_seen_with_loaded_dl = 0 for batch in prepared_train_dl: x, y = batch outputs = prepared_model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) prepared_optimizer.step() prepared_scheduler.step() prepared_optimizer.zero_grad() batches_seen_with_loaded_dl += 1 unwrapped_model_2 = accelerator.unwrap_model(prepared_model) new_linear1 = unwrapped_model_2.linear1.weight new_batchnorm = unwrapped_model_2.batchnorm.weight new_linear2 = unwrapped_model_2.linear2.weight # Assert equalities assert batches_seen_with_loaded_dl == len(not_skipped_batches) assert torch.allclose(original_linear1, new_linear1) assert torch.allclose(original_batchnorm, new_batchnorm) assert torch.allclose(original_linear2, new_linear2)
accelerate/tests/test_accelerator.py/0
{ "file_path": "accelerate/tests/test_accelerator.py", "repo_id": "accelerate", "token_count": 13399 }
14
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys from accelerate.test_utils import require_transformer_engine from accelerate.test_utils.testing import TempDirTestCase, require_import_timer from accelerate.utils import is_import_timer_available if is_import_timer_available(): from import_timer import calculate_total_time, read_import_profile from import_timer.core import get_paths_above_threshold, sort_nodes_by_total_time def convert_list_to_string(data): end_result = "" arrow_right = "->" for path in data: end_result += f"{arrow_right.join(path[0])} {path[1]:.3f}s\n" return end_result def run_import_time(command: str): output = subprocess.run([sys.executable, "-X", "importtime", "-c", command], capture_output=True, text=True) return output.stderr @require_import_timer class ImportSpeedTester(TempDirTestCase): """ Test suite which checks if imports have seen slowdowns based on a particular baseline. If the error messages are not clear enough to get a full view of what is slowing things down (or to figure out how deep the initial depth should be), please view the profile with the `tuna` framework: `tuna import.log`. """ clear_on_setup = False @classmethod def setUpClass(cls): super().setUpClass() output = run_import_time("import torch") data = read_import_profile(output) total_time = calculate_total_time(data) cls.pytorch_time = total_time def test_base_import(self): output = run_import_time("import accelerate") data = read_import_profile(output) total_time = calculate_total_time(data) pct_more = (total_time - self.pytorch_time) / self.pytorch_time * 100 # Base import should never be more than 20% slower than raw torch import err_msg = f"Base import is more than 20% slower than raw torch import ({pct_more:.2f}%), please check the attached `tuna` profile:\n" sorted_data = sort_nodes_by_total_time(data) paths_above_threshold = get_paths_above_threshold(sorted_data, 0.05, max_depth=7) err_msg += f"\n{convert_list_to_string(paths_above_threshold)}" self.assertLess(pct_more, 20, err_msg) def test_cli_import(self): output = run_import_time("from accelerate.commands.launch import launch_command_parser") data = read_import_profile(output) total_time = calculate_total_time(data) pct_more = (total_time - self.pytorch_time) / self.pytorch_time * 100 # Base import should never be more than 20% slower than raw torch import err_msg = f"Base import is more than 20% slower than raw torch import ({pct_more:.2f}%), please check the attached `tuna` profile:\n" sorted_data = sort_nodes_by_total_time(data) paths_above_threshold = get_paths_above_threshold(sorted_data, 0.05, max_depth=7) err_msg += f"\n{convert_list_to_string(paths_above_threshold)}" self.assertLess(pct_more, 20, err_msg) @require_transformer_engine class LazyImportTester(TempDirTestCase): """ Test suite which checks if specific packages are lazy-loaded. Eager-import will trigger circular import in some case, e.g. in huggingface/accelerate#3056. """ def test_te_import(self): output = run_import_time("import accelerate, accelerate.utils.transformer_engine") self.assertFalse(" transformer_engine" in output, "`transformer_engine` should not be imported on import")
accelerate/tests/test_imports.py/0
{ "file_path": "accelerate/tests/test_imports.py", "repo_id": "accelerate", "token_count": 1442 }
15
compute_environment: LOCAL_MACHINE debug: false distributed_type: MULTI_GPU downcast_bf16: 'no' gpu_ids: all machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
alignment-handbook/recipes/accelerate_configs/multi_gpu.yaml/0
{ "file_path": "alignment-handbook/recipes/accelerate_configs/multi_gpu.yaml", "repo_id": "alignment-handbook", "token_count": 129 }
16
# Instructions to train StarChat2 Similar to how we trained Zephyr 7B Beta in our [technical report](https://huggingface.co/papers/2310.16944), training this model proceeds in two steps: 1. Apply SFT to fine-tune [StarCoder2 15B](https://huggingface.co/bigcode/starcoder2-15b) on a blend of chat, code, and math datastets. The result is an SFT model like [`starchat2-15b-sft-v0.1`](https://huggingface.co/HuggingFaceH4/starchat2-15b-sft-v0.1). 2. Align the SFT model to AI feedback via DPO on the UltraFeedback and Orca DPO Pairs datasets. The result is a DPO model like [`starchat2-15b-v0.1`](https://huggingface.co/HuggingFaceH4/starchat2-15b-v0.1). See below for commands to train these models using DeepSpeed ZeRO-3. ## Full training examples You will require 8 GPUs (80GB of VRAM) to train the full model - alternatively, you can train on 1 GPU by adjusting `per_device_train_batch_size` and `gradient_accumulation_steps` to keep the global batch size constant. A recipe involving QLoRA will come later ๐Ÿค—. ```shell # Step 1 - SFT ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_sft.py recipes/starchat2-15b/sft/config_v0.1.yaml # Step 2 - DPO ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/deepspeed_zero3.yaml scripts/run_dpo.py recipes/starchat2-15b/dpo/config_v0.1.yaml ```
alignment-handbook/recipes/starchat2-15b/README.md/0
{ "file_path": "alignment-handbook/recipes/starchat2-15b/README.md", "repo_id": "alignment-handbook", "token_count": 495 }
17
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import random import sys from typing import Any, Dict import torch import transformers from transformers import AutoModelForCausalLM, set_seed from alignment import ( DataArguments, H4ArgumentParser, ModelArguments, apply_chat_template, decontaminate_humaneval, get_checkpoint, get_datasets, get_kbit_device_map, get_peft_config, get_quantization_config, get_tokenizer, ) from trl import ORPOConfig, ORPOTrainer, setup_chat_format logger = logging.getLogger(__name__) def main(): parser = H4ArgumentParser((ModelArguments, DataArguments, ORPOConfig)) model_args, data_args, training_args = parser.parse() ####### # Setup ####### logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.info(f"Model parameters {model_args}") logger.info(f"Data parameters {data_args}") logger.info(f"Training/evaluation parameters {training_args}") # Check for last checkpoint last_checkpoint = get_checkpoint(training_args) if last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info(f"Checkpoint detected, resuming training at {last_checkpoint=}.") # Set seed for reproducibility set_seed(training_args.seed) ############### # Load datasets ############### raw_datasets = get_datasets( data_args, splits=data_args.dataset_splits, configs=data_args.dataset_configs, columns_to_keep=[ "prompt", "chosen", "rejected", ], ) logger.info( f"Training on the following splits: {[split + ' : ' + str(dset.num_rows) for split, dset in raw_datasets.items()]}" ) column_names = list(raw_datasets["train"].features) ##################################### # Load tokenizer and process datasets ##################################### data_args.truncation_side = "left" # Truncate from left to ensure we don't lose labels in final turn tokenizer = get_tokenizer(model_args, data_args) torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) # For ChatML we need to add special tokens and resize the embedding layer if "<|im_start|>" in tokenizer.chat_template: model, tokenizer = setup_chat_format(model, tokenizer) ##################### # Apply chat template ##################### raw_datasets = raw_datasets.map( apply_chat_template, fn_kwargs={ "tokenizer": tokenizer, "task": "orpo", "auto_insert_empty_system_msg": data_args.auto_insert_empty_system_msg, }, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, desc="Formatting comparisons with prompt template", ) ############################# # Filter out seq > max_length ############################# if training_args.max_prompt_length is not None: unfiltered_train_samples = len(raw_datasets["train"]) if "test" in raw_datasets: unfiltered_test_samples = len(raw_datasets["test"]) def filter_fn(sample: Dict[str, Any]) -> Dict[str, Any]: prompt_length = tokenizer( sample["text_prompt"], return_tensors="pt", )[ "input_ids" ].size(dim=-1) return prompt_length < training_args.max_prompt_length raw_datasets = raw_datasets.filter( filter_fn, desc="Filtering out the samples where len(text_prompt) > max_prompt_length", ) filtered_train_samples = unfiltered_train_samples - len(raw_datasets["train"]) logger.info( f"Filtered out {filtered_train_samples} training samples out of the {unfiltered_train_samples} samples." ) if "test" in raw_datasets: filtered_test_samples = unfiltered_test_samples - len(raw_datasets["test"]) logger.info( f"Filtered out {filtered_test_samples} test samples out of the {unfiltered_test_samples} samples." ) ########################## # Decontaminate benchmarks ########################## num_raw_train_samples = len(raw_datasets["train"]) raw_datasets = raw_datasets.filter( decontaminate_humaneval, fn_kwargs={"text_column": "text_chosen"}, batched=True, batch_size=10_000, num_proc=1, desc="Decontaminating HumanEval samples", ) num_filtered_train_samples = num_raw_train_samples - len(raw_datasets["train"]) logger.info( f"Decontaminated {num_filtered_train_samples} ({num_filtered_train_samples/num_raw_train_samples * 100:.2f}%) samples from the training set." ) # Replace column names with what TRL needs, text_prompt -> prompt, text_chosen -> chosen and text_rejected -> rejected for split in raw_datasets.keys(): raw_datasets[split] = raw_datasets[split].rename_columns( { "text_prompt": "prompt", "text_chosen": "chosen", "text_rejected": "rejected", } ) # Log a few random samples from the training set: for index in random.sample(range(len(raw_datasets["train"])), 3): logger.info(f"Prompt sample {index} of the raw training set:\n\n{raw_datasets['train'][index]['prompt']}") logger.info(f"Chosen sample {index} of the raw training set:\n\n{raw_datasets['train'][index]['chosen']}") logger.info(f"Rejected sample {index} of the raw training set:\n\n{raw_datasets['train'][index]['rejected']}") ########################## # Instantiate ORPO trainer ########################## trainer = ORPOTrainer( model, args=training_args, train_dataset=raw_datasets["train"], eval_dataset=raw_datasets["test"] if "test" in raw_datasets else None, tokenizer=tokenizer, peft_config=get_peft_config(model_args), # type: ignore ) ############### # Training loop ############### checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics metrics["train_samples"] = len(raw_datasets["train"]) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() logger.info("*** Training complete ***") ################################## # Save model and create model card ################################## logger.info("*** Save model ***") if trainer.is_fsdp_enabled: trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT") trainer.save_model(training_args.output_dir) logger.info(f"Model saved to {training_args.output_dir}") # Save everything else on main process kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": list(data_args.dataset_mixer.keys()), "dataset_tags": list(data_args.dataset_mixer.keys()), "tags": ["alignment-handbook"], } if trainer.accelerator.is_main_process: trainer.create_model_card(**kwargs) # Restore k,v cache for fast inference trainer.model.config.use_cache = True trainer.model.config.save_pretrained(training_args.output_dir) ########## # Evaluate ########## if training_args.do_eval and "test" in raw_datasets: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] = len(raw_datasets["test"]) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.push_to_hub is True: logger.info("Pushing to hub...") trainer.push_to_hub(**kwargs) logger.info("*** Training complete! ***") if __name__ == "__main__": main()
alignment-handbook/scripts/run_orpo.py/0
{ "file_path": "alignment-handbook/scripts/run_orpo.py", "repo_id": "alignment-handbook", "token_count": 3945 }
18
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from alignment import ( DataArguments, ModelArguments, get_peft_config, get_quantization_config, get_tokenizer, is_adapter_model, ) from alignment.data import DEFAULT_CHAT_TEMPLATE class GetQuantizationConfigTest(unittest.TestCase): def test_4bit(self): model_args = ModelArguments(load_in_4bit=True) quantization_config = get_quantization_config(model_args) self.assertTrue(quantization_config["load_in_4bit"]) self.assertEqual(quantization_config["bnb_4bit_compute_dtype"], "float16") self.assertEqual(quantization_config["bnb_4bit_quant_type"], "nf4") self.assertFalse(quantization_config["bnb_4bit_use_double_quant"]) def test_8bit(self): model_args = ModelArguments(load_in_8bit=True) quantization_config = get_quantization_config(model_args) self.assertTrue(quantization_config["load_in_8bit"]) def test_no_quantization(self): model_args = ModelArguments() quantization_config = get_quantization_config(model_args) self.assertIsNone(quantization_config) class GetTokenizerTest(unittest.TestCase): def setUp(self) -> None: self.model_args = ModelArguments(model_name_or_path="HuggingFaceH4/zephyr-7b-alpha") def test_right_truncation_side(self): tokenizer = get_tokenizer(self.model_args, DataArguments(truncation_side="right")) self.assertEqual(tokenizer.truncation_side, "right") def test_left_truncation_side(self): tokenizer = get_tokenizer(self.model_args, DataArguments(truncation_side="left")) self.assertEqual(tokenizer.truncation_side, "left") def test_default_chat_template(self): tokenizer = get_tokenizer(self.model_args, DataArguments()) self.assertEqual(tokenizer.chat_template, DEFAULT_CHAT_TEMPLATE) def test_chatml_chat_template(self): chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" tokenizer = get_tokenizer(self.model_args, DataArguments(chat_template=chat_template)) self.assertEqual(tokenizer.chat_template, chat_template) class GetPeftConfigTest(unittest.TestCase): def test_peft_config(self): model_args = ModelArguments(use_peft=True, lora_r=42, lora_alpha=0.66, lora_dropout=0.99) peft_config = get_peft_config(model_args) self.assertEqual(peft_config.r, 42) self.assertEqual(peft_config.lora_alpha, 0.66) self.assertEqual(peft_config.lora_dropout, 0.99) def test_no_peft_config(self): model_args = ModelArguments(use_peft=False) peft_config = get_peft_config(model_args) self.assertIsNone(peft_config) class IsAdapterModelTest(unittest.TestCase): def test_is_adapter_model_calls_listdir(self): # Assert that for an invalid repo name it gets to the point where it calls os.listdir, # which is expected to raise a FileNotFoundError self.assertRaises(FileNotFoundError, is_adapter_model, "nonexistent/model")
alignment-handbook/tests/test_model_utils.py/0
{ "file_path": "alignment-handbook/tests/test_model_utils.py", "repo_id": "alignment-handbook", "token_count": 1490 }
19
[book] authors = ["Nicolas Patry"] language = "en" multilingual = false src = "src" title = "Candle Documentation"
candle/candle-book/book.toml/0
{ "file_path": "candle/candle-book/book.toml", "repo_id": "candle", "token_count": 38 }
20
# Advanced Cuda usage
candle/candle-book/src/inference/cuda/README.md/0
{ "file_path": "candle/candle-book/src/inference/cuda/README.md", "repo_id": "candle", "token_count": 6 }
21
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor) { a.affine(12.34, 56.78).unwrap(); } fn run_affine_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let b = 1; let m = 1024; let k = 1024; let tensor = Tensor::zeros((b, m, k), dtype, device).unwrap(); let flops = b * m * k * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_affine_benchmark(c, &device, DType::F32, "affine_f32"); run_affine_benchmark(c, &device, DType::F16, "affine_f16"); run_affine_benchmark(c, &device, DType::BF16, "affine_bf16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/affine.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/affine.rs", "repo_id": "candle", "token_count": 590 }
22
//! Implement conversion traits for tensors use crate::{DType, Device, Error, Tensor, WithDType}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::convert::TryFrom; impl<T: WithDType> TryFrom<&Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec1::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec2::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec3::<T>() } } impl<T: WithDType> TryFrom<Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<T>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<T>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<Vec<T>>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<&[T]> for Tensor { type Error = Error; fn try_from(v: &[T]) -> Result<Self, Self::Error> { Tensor::from_slice(v, v.len(), &Device::Cpu) } } impl<T: WithDType> TryFrom<Vec<T>> for Tensor { type Error = Error; fn try_from(v: Vec<T>) -> Result<Self, Self::Error> { let len = v.len(); Tensor::from_vec(v, len, &Device::Cpu) } } macro_rules! from_tensor { ($typ:ident) => { impl TryFrom<&Tensor> for $typ { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_scalar::<$typ>() } } impl TryFrom<Tensor> for $typ { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { $typ::try_from(&tensor) } } impl TryFrom<$typ> for Tensor { type Error = Error; fn try_from(v: $typ) -> Result<Self, Self::Error> { Tensor::new(v, &Device::Cpu) } } }; } from_tensor!(f64); from_tensor!(f32); from_tensor!(f16); from_tensor!(bf16); from_tensor!(i64); from_tensor!(u32); from_tensor!(u8); impl Tensor { pub fn write_bytes<W: std::io::Write>(&self, f: &mut W) -> crate::Result<()> { use byteorder::{LittleEndian, WriteBytesExt}; let vs = self.flatten_all()?; match self.dtype() { DType::BF16 => { let vs = vs.to_vec1::<bf16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F16 => { let vs = vs.to_vec1::<f16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F32 => { // TODO: Avoid using a buffer when data is already on the CPU. for v in vs.to_vec1::<f32>()? { f.write_f32::<LittleEndian>(v)? } } DType::F64 => { for v in vs.to_vec1::<f64>()? { f.write_f64::<LittleEndian>(v)? } } DType::U32 => { for v in vs.to_vec1::<u32>()? { f.write_u32::<LittleEndian>(v)? } } DType::I64 => { for v in vs.to_vec1::<i64>()? { f.write_i64::<LittleEndian>(v)? } } DType::U8 => { let vs = vs.to_vec1::<u8>()?; f.write_all(&vs)?; } } Ok(()) } }
candle/candle-core/src/convert.rs/0
{ "file_path": "candle/candle-core/src/convert.rs", "repo_id": "candle", "token_count": 2242 }
23
/// Pretty printing of tensors /// This implementation should be in line with the PyTorch version. /// https://github.com/pytorch/pytorch/blob/7b419e8513a024e172eae767e24ec1b849976b13/torch/_tensor_str.py use crate::{DType, Result, Tensor, WithDType}; use half::{bf16, f16}; impl Tensor { fn fmt_dt<T: WithDType + std::fmt::Display>( &self, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { format!(", cuda:{}", gpu_id) } crate::DeviceLocation::Metal { gpu_id } => { format!(", metal:{}", gpu_id) } }; write!(f, "Tensor[")?; match self.dims() { [] => { if let Ok(v) = self.to_scalar::<T>() { write!(f, "{v}")? } } [s] if *s < 10 => { if let Ok(vs) = self.to_vec1::<T>() { for (i, v) in vs.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{v}")?; } } } dims => { write!(f, "dims ")?; for (i, d) in dims.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{d}")?; } } } write!(f, "; {}{}]", self.dtype().as_str(), device_str) } } impl std::fmt::Debug for Tensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self.dtype() { DType::U8 => self.fmt_dt::<u8>(f), DType::U32 => self.fmt_dt::<u32>(f), DType::I64 => self.fmt_dt::<i64>(f), DType::BF16 => self.fmt_dt::<bf16>(f), DType::F16 => self.fmt_dt::<f16>(f), DType::F32 => self.fmt_dt::<f32>(f), DType::F64 => self.fmt_dt::<f64>(f), } } } /// Options for Tensor pretty printing #[derive(Debug, Clone)] pub struct PrinterOptions { pub precision: usize, pub threshold: usize, pub edge_items: usize, pub line_width: usize, pub sci_mode: Option<bool>, } static PRINT_OPTS: std::sync::Mutex<PrinterOptions> = std::sync::Mutex::new(PrinterOptions::const_default()); impl PrinterOptions { // We cannot use the default trait as it's not const. const fn const_default() -> Self { Self { precision: 4, threshold: 1000, edge_items: 3, line_width: 80, sci_mode: None, } } } pub fn print_options() -> &'static std::sync::Mutex<PrinterOptions> { &PRINT_OPTS } pub fn set_print_options(options: PrinterOptions) { *PRINT_OPTS.lock().unwrap() = options } pub fn set_print_options_default() { *PRINT_OPTS.lock().unwrap() = PrinterOptions::const_default() } pub fn set_print_options_short() { *PRINT_OPTS.lock().unwrap() = PrinterOptions { precision: 2, threshold: 1000, edge_items: 2, line_width: 80, sci_mode: None, } } pub fn set_print_options_full() { *PRINT_OPTS.lock().unwrap() = PrinterOptions { precision: 4, threshold: usize::MAX, edge_items: 3, line_width: 80, sci_mode: None, } } pub fn set_line_width(line_width: usize) { PRINT_OPTS.lock().unwrap().line_width = line_width } pub fn set_precision(precision: usize) { PRINT_OPTS.lock().unwrap().precision = precision } pub fn set_edge_items(edge_items: usize) { PRINT_OPTS.lock().unwrap().edge_items = edge_items } pub fn set_threshold(threshold: usize) { PRINT_OPTS.lock().unwrap().threshold = threshold } pub fn set_sci_mode(sci_mode: Option<bool>) { PRINT_OPTS.lock().unwrap().sci_mode = sci_mode } struct FmtSize { current_size: usize, } impl FmtSize { fn new() -> Self { Self { current_size: 0 } } fn final_size(self) -> usize { self.current_size } } impl std::fmt::Write for FmtSize { fn write_str(&mut self, s: &str) -> std::fmt::Result { self.current_size += s.len(); Ok(()) } } trait TensorFormatter { type Elem: WithDType; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result; fn max_width(&self, to_display: &Tensor) -> usize { let mut max_width = 1; if let Ok(vs) = to_display.flatten_all().and_then(|t| t.to_vec1()) { for &v in vs.iter() { let mut fmt_size = FmtSize::new(); let _res = self.fmt(v, 1, &mut fmt_size); max_width = usize::max(max_width, fmt_size.final_size()) } } max_width } fn write_newline_indent(i: usize, f: &mut std::fmt::Formatter) -> std::fmt::Result { writeln!(f)?; for _ in 0..i { write!(f, " ")? } Ok(()) } fn fmt_tensor( &self, t: &Tensor, indent: usize, max_w: usize, summarize: bool, po: &PrinterOptions, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { let dims = t.dims(); let edge_items = po.edge_items; write!(f, "[")?; match dims { [] => { if let Ok(v) = t.to_scalar::<Self::Elem>() { self.fmt(v, max_w, f)? } } [v] if summarize && *v > 2 * edge_items => { if let Ok(vs) = t .narrow(0, 0, edge_items) .and_then(|t| t.to_vec1::<Self::Elem>()) { for v in vs.into_iter() { self.fmt(v, max_w, f)?; write!(f, ", ")?; } } write!(f, "...")?; if let Ok(vs) = t .narrow(0, v - edge_items, edge_items) .and_then(|t| t.to_vec1::<Self::Elem>()) { for v in vs.into_iter() { write!(f, ", ")?; self.fmt(v, max_w, f)?; } } } [_] => { let elements_per_line = usize::max(1, po.line_width / (max_w + 2)); if let Ok(vs) = t.to_vec1::<Self::Elem>() { for (i, v) in vs.into_iter().enumerate() { if i > 0 { if i % elements_per_line == 0 { write!(f, ",")?; Self::write_newline_indent(indent, f)? } else { write!(f, ", ")?; } } self.fmt(v, max_w, f)? } } } _ => { if summarize && dims[0] > 2 * edge_items { for i in 0..edge_items { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } write!(f, ",")?; Self::write_newline_indent(indent, f)? } write!(f, "...")?; Self::write_newline_indent(indent, f)?; for i in dims[0] - edge_items..dims[0] { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } if i + 1 != dims[0] { write!(f, ",")?; Self::write_newline_indent(indent, f)? } } } else { for i in 0..dims[0] { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } if i + 1 != dims[0] { write!(f, ",")?; Self::write_newline_indent(indent, f)? } } } } } write!(f, "]")?; Ok(()) } } struct FloatFormatter<S: WithDType> { int_mode: bool, sci_mode: bool, precision: usize, _phantom: std::marker::PhantomData<S>, } impl<S> FloatFormatter<S> where S: WithDType + num_traits::Float + std::fmt::Display, { fn new(t: &Tensor, po: &PrinterOptions) -> Result<Self> { let mut int_mode = true; let mut sci_mode = false; // Rather than containing all values, this should only include // values that end up being displayed according to [threshold]. let values = t .flatten_all()? .to_vec1()? .into_iter() .filter(|v: &S| v.is_finite() && !v.is_zero()) .collect::<Vec<_>>(); if !values.is_empty() { let mut nonzero_finite_min = S::max_value(); let mut nonzero_finite_max = S::min_value(); for &v in values.iter() { let v = v.abs(); if v < nonzero_finite_min { nonzero_finite_min = v } if v > nonzero_finite_max { nonzero_finite_max = v } } for &value in values.iter() { if value.ceil() != value { int_mode = false; break; } } if let Some(v1) = S::from(1000.) { if let Some(v2) = S::from(1e8) { if let Some(v3) = S::from(1e-4) { sci_mode = nonzero_finite_max / nonzero_finite_min > v1 || nonzero_finite_max > v2 || nonzero_finite_min < v3 } } } } match po.sci_mode { None => {} Some(v) => sci_mode = v, } Ok(Self { int_mode, sci_mode, precision: po.precision, _phantom: std::marker::PhantomData, }) } } impl<S> TensorFormatter for FloatFormatter<S> where S: WithDType + num_traits::Float + std::fmt::Display + std::fmt::LowerExp, { type Elem = S; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result { if self.sci_mode { write!( f, "{v:width$.prec$e}", v = v, width = max_w, prec = self.precision ) } else if self.int_mode { if v.is_finite() { write!(f, "{v:width$.0}.", v = v, width = max_w - 1) } else { write!(f, "{v:max_w$.0}") } } else { write!( f, "{v:width$.prec$}", v = v, width = max_w, prec = self.precision ) } } } struct IntFormatter<S: WithDType> { _phantom: std::marker::PhantomData<S>, } impl<S: WithDType> IntFormatter<S> { fn new() -> Self { Self { _phantom: std::marker::PhantomData, } } } impl<S> TensorFormatter for IntFormatter<S> where S: WithDType + std::fmt::Display, { type Elem = S; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result { write!(f, "{v:max_w$}") } } fn get_summarized_data(t: &Tensor, edge_items: usize) -> Result<Tensor> { let dims = t.dims(); if dims.is_empty() { Ok(t.clone()) } else if dims.len() == 1 { if dims[0] > 2 * edge_items { Tensor::cat( &[ t.narrow(0, 0, edge_items)?, t.narrow(0, dims[0] - edge_items, edge_items)?, ], 0, ) } else { Ok(t.clone()) } } else if dims[0] > 2 * edge_items { let mut vs: Vec<_> = (0..edge_items) .map(|i| get_summarized_data(&t.get(i)?, edge_items)) .collect::<Result<Vec<_>>>()?; for i in (dims[0] - edge_items)..dims[0] { vs.push(get_summarized_data(&t.get(i)?, edge_items)?) } Tensor::cat(&vs, 0) } else { let vs: Vec<_> = (0..dims[0]) .map(|i| get_summarized_data(&t.get(i)?, edge_items)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&vs, 0) } } impl std::fmt::Display for Tensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let po = PRINT_OPTS.lock().unwrap(); let summarize = self.elem_count() > po.threshold; let to_display = if summarize { match get_summarized_data(self, po.edge_items) { Ok(v) => v, Err(err) => return write!(f, "{err:?}"), } } else { self.clone() }; match self.dtype() { DType::U8 => { let tf: IntFormatter<u8> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::U32 => { let tf: IntFormatter<u32> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::I64 => { let tf: IntFormatter<i64> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::BF16 => { if let Ok(tf) = FloatFormatter::<bf16>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F16 => { if let Ok(tf) = FloatFormatter::<f16>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F64 => { if let Ok(tf) = FloatFormatter::<f64>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F32 => { if let Ok(tf) = FloatFormatter::<f32>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } }; let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { format!(", cuda:{}", gpu_id) } crate::DeviceLocation::Metal { gpu_id } => { format!(", metal:{}", gpu_id) } }; write!( f, "Tensor[{:?}, {}{}]", self.dims(), self.dtype().as_str(), device_str ) } }
candle/candle-core/src/display.rs/0
{ "file_path": "candle/candle-core/src/display.rs", "repo_id": "candle", "token_count": 9753 }
24
#![allow(unused)] use super::GgmlDType; use crate::{CudaDevice, CudaStorage, Error, Result}; pub struct QCudaStorage { dtype: GgmlDType, device: CudaDevice, } impl QCudaStorage { pub fn zeros(_: &CudaDevice, _: usize, _: GgmlDType) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } pub fn dtype(&self) -> GgmlDType { self.dtype } pub fn device(&self) -> &CudaDevice { &self.device } pub fn dequantize(&self, _elem_count: usize) -> Result<CudaStorage> { Err(Error::NotCompiledWithCudaSupport) } pub fn dequantize_f16(&self, _elem_count: usize) -> Result<CudaStorage> { Err(Error::NotCompiledWithCudaSupport) } pub fn quantize(&mut self, _src: &CudaStorage) -> Result<()> { Err(Error::NotCompiledWithCudaSupport) } pub fn storage_size_in_bytes(&self) -> usize { 0 } pub fn fwd( &self, _self_shape: &crate::Shape, _storage: &CudaStorage, _layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { Err(Error::NotCompiledWithCudaSupport) } } pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>( _device: &CudaDevice, _data: &[T], ) -> Result<super::QStorage> { Err(Error::NotCompiledWithCudaSupport) }
candle/candle-core/src/quantized/dummy_cuda.rs/0
{ "file_path": "candle/candle-core/src/quantized/dummy_cuda.rs", "repo_id": "candle", "token_count": 594 }
25
use crate::Layout; /// An iterator over offset position for items of an N-dimensional arrays stored in a /// flat buffer using some potential strides. #[derive(Debug)] pub struct StridedIndex<'a> { next_storage_index: Option<usize>, multi_index: Vec<usize>, dims: &'a [usize], stride: &'a [usize], } impl<'a> StridedIndex<'a> { pub(crate) fn new(dims: &'a [usize], stride: &'a [usize], start_offset: usize) -> Self { let elem_count: usize = dims.iter().product(); let next_storage_index = if elem_count == 0 { None } else { // This applies to the scalar case. Some(start_offset) }; StridedIndex { next_storage_index, multi_index: vec![0; dims.len()], dims, stride, } } pub(crate) fn from_layout(l: &'a Layout) -> Self { Self::new(l.dims(), l.stride(), l.start_offset()) } } impl<'a> Iterator for StridedIndex<'a> { type Item = usize; fn next(&mut self) -> Option<Self::Item> { let storage_index = match self.next_storage_index { None => return None, Some(storage_index) => storage_index, }; let mut updated = false; let mut next_storage_index = storage_index; for ((multi_i, max_i), stride_i) in self .multi_index .iter_mut() .zip(self.dims.iter()) .zip(self.stride.iter()) .rev() { let next_i = *multi_i + 1; if next_i < *max_i { *multi_i = next_i; updated = true; next_storage_index += stride_i; break; } else { next_storage_index -= *multi_i * stride_i; *multi_i = 0 } } self.next_storage_index = if updated { Some(next_storage_index) } else { None }; Some(storage_index) } } #[derive(Debug)] pub enum StridedBlocks<'a> { SingleBlock { start_offset: usize, len: usize, }, MultipleBlocks { block_start_index: StridedIndex<'a>, block_len: usize, }, }
candle/candle-core/src/strided_index.rs/0
{ "file_path": "candle/candle-core/src/strided_index.rs", "repo_id": "candle", "token_count": 1148 }
26
import torch from collections import OrderedDict # Write a trivial tensor to a pt file a= torch.tensor([[1,2,3,4], [5,6,7,8]]) o = OrderedDict() o["test"] = a # Write a trivial tensor to a pt file torch.save(o, "test.pt") ############################################################################################################ # Write a trivial tensor to a pt file with a key torch.save({"model_state_dict": o}, "test_with_key.pt") ############################################################################################################ # Create a tensor with fortran contiguous memory layout import numpy as np # Step 1: Create a 3D NumPy array with Fortran order using a range of numbers # For example, creating a 2x3x4 array array_fortran = np.asfortranarray(np.arange(1, 2*3*4 + 1).reshape(2, 3, 4)) # Verify the memory order print("Is Fortran contiguous (F order):", array_fortran.flags['F_CONTIGUOUS']) # Should be True print("Is C contiguous (C order):", array_fortran.flags['C_CONTIGUOUS']) # Should be False # Step 2: Convert the NumPy array to a PyTorch tensor tensor_fortran = torch.from_numpy(array_fortran) # Verify the tensor layout print("Tensor stride:", tensor_fortran.stride()) # Stride will reflect the Fortran memory layout # Step 3: Save the PyTorch tensor to a .pth file torch.save({"tensor_fortran": tensor_fortran}, 'fortran_tensor_3d.pth') print("3D Tensor saved with Fortran layout.")
candle/candle-core/tests/pth.py/0
{ "file_path": "candle/candle-core/tests/pth.py", "repo_id": "candle", "token_count": 441 }
27
//! The CIFAR-10 dataset. //! //! The files can be downloaded from the following page: //! <https://www.cs.toronto.edu/~kriz/cifar.html> //! The binary version of the dataset is used. use crate::vision::Dataset; use candle::{DType, Device, Error, Result, Tensor}; use hf_hub::{api::sync::Api, Repo, RepoType}; use parquet::file::reader::{FileReader, SerializedFileReader}; use std::fs::File; use std::io::{BufReader, Read}; const W: usize = 32; const H: usize = 32; const C: usize = 3; const BYTES_PER_IMAGE: usize = W * H * C + 1; const SAMPLES_PER_FILE: usize = 10000; fn read_file(filename: &std::path::Path) -> Result<(Tensor, Tensor)> { let mut buf_reader = BufReader::new(File::open(filename)?); let mut data = vec![0u8; SAMPLES_PER_FILE * BYTES_PER_IMAGE]; buf_reader.read_exact(&mut data)?; let mut images = vec![]; let mut labels = vec![]; for index in 0..SAMPLES_PER_FILE { let content_offset = BYTES_PER_IMAGE * index; labels.push(data[content_offset]); images.push(&data[1 + content_offset..content_offset + BYTES_PER_IMAGE]); } let images: Vec<u8> = images .iter() .copied() .flatten() .copied() .collect::<Vec<_>>(); let labels = Tensor::from_vec(labels, SAMPLES_PER_FILE, &Device::Cpu)?; let images = Tensor::from_vec(images, (SAMPLES_PER_FILE, C, H, W), &Device::Cpu)?; let images = (images.to_dtype(DType::F32)? / 255.)?; Ok((images, labels)) } pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<Dataset> { let dir = dir.as_ref(); let (test_images, test_labels) = read_file(&dir.join("test_batch.bin"))?; let train_images_and_labels = [ "data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin", ] .iter() .map(|x| read_file(&dir.join(x))) .collect::<Result<Vec<_>>>()?; let (train_images, train_labels): (Vec<_>, Vec<_>) = train_images_and_labels.into_iter().unzip(); Ok(Dataset { train_images: Tensor::cat(&train_images, 0)?, train_labels: Tensor::cat(&train_labels, 0)?, test_images, test_labels, labels: 10, }) } fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> { let samples = parquet.metadata().file_metadata().num_rows() as usize; let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 1_024); let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples); for row in parquet.into_iter().flatten() { for (_name, field) in row.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); buffer_images.extend(image.to_rgb8().as_raw()); } } } else if let parquet::record::Field::Long(label) = field { buffer_labels.push(*label as u8); } } } let images = (Tensor::from_vec(buffer_images, (samples, 3, 32, 32), &Device::Cpu)? .to_dtype(DType::U8)? / 255.)?; let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?; Ok((images, labels)) } pub fn load() -> Result<Dataset> { let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?; let dataset_id = "cifar10".to_string(); let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let test_parquet_filename = repo .get("plain_text/test/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let train_parquet_filename = repo .get("plain_text/train/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let (test_images, test_labels) = load_parquet(test_parquet)?; let (train_images, train_labels) = load_parquet(train_parquet)?; Ok(crate::vision::Dataset { train_images, train_labels, test_images, test_labels, labels: 10, }) }
candle/candle-datasets/src/vision/cifar.rs/0
{ "file_path": "candle/candle-datasets/src/vision/cifar.rs", "repo_id": "candle", "token_count": 2139 }
28
//! DINOv2: Learning Robust Visual Features without Supervision //! https://github.com/facebookresearch/dinov2 #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::Parser; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::dinov2; #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-dino-v2".into()); api.get("dinov2_vits14.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = dinov2::vit_small(vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/dinov2/main.rs/0
{ "file_path": "candle/candle-examples/examples/dinov2/main.rs", "repo_id": "candle", "token_count": 791 }
29
# candle-fastvit [FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization](https://arxiv.org/abs/2303.14189). This candle implementation uses a pre-trained FastViT network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example fastvit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which sa12 loaded image Tensor[dims 3, 256, 256; f32] model built mountain bike, all-terrain bike, off-roader: 52.67% bicycle-built-for-two, tandem bicycle, tandem: 7.93% unicycle, monocycle : 3.46% maillot : 1.32% crash helmet : 1.28% ```
candle/candle-examples/examples/fastvit/README.md/0
{ "file_path": "candle/candle-examples/examples/fastvit/README.md", "repo_id": "candle", "token_count": 258 }
30
// An implementation of LLaMA https://github.com/facebookresearch/llama // // This is based on nanoGPT in a similar way to: // https://github.com/Lightning-AI/lit-llama/blob/main/lit_llama/model.py // // The tokenizer config can be retrieved from: // https://huggingface.co/hf-internal-testing/llama-tokenizer/raw/main/tokenizer.json #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{bail, Error as E, Result}; use clap::{Parser, ValueEnum}; use candle::{DType, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::{LogitsProcessor, Sampling}; use hf_hub::{api::sync::Api, Repo, RepoType}; use std::io::Write; use candle_transformers::models::llama as model; use model::{Llama, LlamaConfig}; const EOS_TOKEN: &str = "</s>"; const DEFAULT_PROMPT: &str = "My favorite theorem is "; #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Which { V1, V2, V3, V31, V3Instruct, V31Instruct, #[value(name = "solar-10.7b")] Solar10_7B, #[value(name = "tiny-llama-1.1b-chat")] TinyLlama1_1BChat, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Only sample among the top K samples. #[arg(long)] top_k: Option<usize>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(short = 'n', long, default_value_t = 10000)] sample_len: usize, /// Disable the key-value cache. #[arg(long)] no_kv_cache: bool, /// The initial prompt. #[arg(long)] prompt: Option<String>, /// Use different dtype than f16 #[arg(long)] dtype: Option<String>, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, /// The model size to use. #[arg(long, default_value = "v3")] which: Which, #[arg(long)] use_flash_attn: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 128)] repeat_last_n: usize, } fn main() -> Result<()> { use tokenizers::Tokenizer; use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(args.cpu)?; let dtype = match args.dtype.as_deref() { Some("f16") => DType::F16, Some("bf16") => DType::BF16, Some("f32") => DType::F32, Some(dtype) => bail!("Unsupported dtype {dtype}"), None => DType::F16, }; let (llama, tokenizer_filename, mut cache, config) = { let api = Api::new()?; let model_id = args.model_id.unwrap_or_else(|| match args.which { Which::V1 => "Narsil/amall-7b".to_string(), Which::V2 => "meta-llama/Llama-2-7b-hf".to_string(), Which::V3 => "meta-llama/Meta-Llama-3-8B".to_string(), Which::V3Instruct => "meta-llama/Meta-Llama-3-8B-Instruct".to_string(), Which::V31 => "meta-llama/Meta-Llama-3.1-8B".to_string(), Which::V31Instruct => "meta-llama/Meta-Llama-3.1-8B-Instruct".to_string(), Which::Solar10_7B => "upstage/SOLAR-10.7B-v1.0".to_string(), Which::TinyLlama1_1BChat => "TinyLlama/TinyLlama-1.1B-Chat-v1.0".to_string(), }); println!("loading the model weights from {model_id}"); let revision = args.revision.unwrap_or("main".to_string()); let api = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let tokenizer_filename = api.get("tokenizer.json")?; let config_filename = api.get("config.json")?; let config: LlamaConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?; let config = config.into_config(args.use_flash_attn); let filenames = match args.which { Which::V1 | Which::V2 | Which::V3 | Which::V3Instruct | Which::V31 | Which::V31Instruct | Which::Solar10_7B => { candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")? } Which::TinyLlama1_1BChat => vec![api.get("model.safetensors")?], }; let cache = model::Cache::new(!args.no_kv_cache, dtype, &config, &device)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; (Llama::load(vb, &config)?, tokenizer_filename, cache, config) }; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let eos_token_id = config.eos_token_id.or_else(|| { tokenizer .token_to_id(EOS_TOKEN) .map(model::LlamaEosToks::Single) }); let prompt = args.prompt.as_ref().map_or(DEFAULT_PROMPT, |p| p.as_str()); let mut tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer); println!("starting the inference loop"); print!("{prompt}"); let mut logits_processor = { let temperature = args.temperature; let sampling = if temperature <= 0. { Sampling::ArgMax } else { match (args.top_k, args.top_p) { (None, None) => Sampling::All { temperature }, (Some(k), None) => Sampling::TopK { k, temperature }, (None, Some(p)) => Sampling::TopP { p, temperature }, (Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature }, } }; LogitsProcessor::from_sampling(args.seed, sampling) }; let mut start_gen = std::time::Instant::now(); let mut index_pos = 0; let mut token_generated = 0; for index in 0..args.sample_len { let (context_size, context_index) = if cache.use_kv_cache && index > 0 { (1, index_pos) } else { (tokens.len(), 0) }; if index == 1 { start_gen = std::time::Instant::now() } let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?; let logits = llama.forward(&input, context_index, &mut cache)?; let logits = logits.squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &tokens[start_at..], )? }; index_pos += ctxt.len(); let next_token = logits_processor.sample(&logits)?; token_generated += 1; tokens.push(next_token); match eos_token_id { Some(model::LlamaEosToks::Single(eos_tok_id)) if next_token == eos_tok_id => { break; } Some(model::LlamaEosToks::Multiple(ref eos_ids)) if eos_ids.contains(&next_token) => { break; } _ => (), } if let Some(t) = tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } let dt = start_gen.elapsed(); println!( "\n\n{} tokens generated ({} token/s)\n", token_generated, (token_generated - 1) as f64 / dt.as_secs_f64(), ); Ok(()) }
candle/candle-examples/examples/llama/main.rs/0
{ "file_path": "candle/candle-examples/examples/llama/main.rs", "repo_id": "candle", "token_count": 3929 }
31
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::{ generation::LogitsProcessor, models::{moondream, quantized_moondream}, }; use tokenizers::Tokenizer; enum Model { Moondream(moondream::Model), Quantized(quantized_moondream::Model), } struct TextGeneration { model: Model, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, repeat_penalty, repeat_last_n, verbose_prompt, device: device.clone(), } } fn run(&mut self, prompt: &str, image_embeds: &Tensor, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?; if tokens.is_empty() { anyhow::bail!("Empty prompts are not supported in the Moondream model.") } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('โ–', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; // Moondream tokenizer bos_token and eos_token is "<|endoftext|>" // https://huggingface.co/vikhyatk/moondream2/blob/main/special_tokens_map.json let special_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") { Some(token) => *token, None => anyhow::bail!("cannot find the special token"), }; let (bos_token, eos_token) = (special_token, special_token); let start_gen = std::time::Instant::now(); let mut load_t = std::time::Duration::from_secs_f64(0f64); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = if index > 0 { match self.model { Model::Moondream(ref mut model) => model.text_model.forward(&input)?, Model::Quantized(ref mut model) => model.text_model.forward(&input)?, } } else { let bos_token = Tensor::new(&[bos_token], &self.device)?.unsqueeze(0)?; let logits = match self.model { Model::Moondream(ref mut model) => { model .text_model .forward_with_img(&bos_token, &input, image_embeds)? } Model::Quantized(ref mut model) => { model .text_model .forward_with_img(&bos_token, &input, image_embeds)? } }; load_t = start_gen.elapsed(); println!("load_t: {:?}", load_t); logits }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token || tokens.ends_with(&[27, 10619, 29] /* <END> */) { break; } let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed() - load_t; println!( "\ngenerated in {} seconds\n{generated_tokens} tokens generated ({:.2} token/s)", dt.as_secs_f64(), (generated_tokens - 1) as f64 / dt.as_secs_f64() ); Ok(()) } } #[derive(Parser)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long)] prompt: String, #[arg(long)] image: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 0)] seed: u64, #[arg(long, default_value_t = 5000)] sample_len: usize, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] quantized: bool, /// Use f16 precision for all the computations rather than f32. #[arg(long)] f16: bool, #[arg(long)] model_file: Option<String>, #[arg(long)] tokenizer_file: Option<String>, } /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 378, 378). pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> candle::Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(378, 378, image::imageops::FilterType::Triangle); // Adjusted to 378x378 let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (378, 378, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } #[tokio::main] async fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = hf_hub::api::tokio::Api::new()?; let (model_id, revision) = match args.model_id { Some(model_id) => (model_id.to_string(), None), None => { if args.quantized { ("santiagomed/candle-moondream".to_string(), None) } else { ( "vikhyatk/moondream2".to_string(), Some("30c7cdf3fa6914f50bee3956694374143f5cc884"), ) } } }; let revision = match (args.revision, revision) { (Some(r), _) => r, (None, Some(r)) => r.to_string(), (None, None) => "main".to_string(), }; let repo = api.repo(hf_hub::Repo::with_revision( model_id, hf_hub::RepoType::Model, revision, )); let model_file = match args.model_file { Some(m) => m.into(), None => { if args.quantized { repo.get("model-q4_0.gguf").await? } else { repo.get("model.safetensors").await? } } }; let tokenizer = match args.tokenizer_file { Some(m) => m.into(), None => repo.get("tokenizer.json").await?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let config = moondream::Config::v2(); let dtype = if args.quantized { if args.f16 { anyhow::bail!("Quantized model does not support f16"); } DType::F32 } else if device.is_cuda() || args.f16 { DType::F16 } else { DType::F32 }; let model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &model_file, &device, )?; let model = quantized_moondream::Model::new(&config, vb)?; Model::Quantized(model) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? }; let model = moondream::Model::new(&config, vb)?; Model::Moondream(model) }; println!("loaded the model in {:?}", start.elapsed()); let start = std::time::Instant::now(); let image = load_image(args.image)? .to_device(&device)? .to_dtype(dtype)?; let image_embeds = image.unsqueeze(0)?; let image_embeds = match model { Model::Moondream(ref m) => image_embeds.apply(m.vision_encoder())?, Model::Quantized(ref m) => image_embeds.apply(m.vision_encoder())?, }; println!( "loaded and encoded the image {image:?} in {:?}", start.elapsed() ); let prompt = format!("\n\nQuestion: {0}\n\nAnswer:", args.prompt); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, args.verbose_prompt, &device, ); pipeline.run(&prompt, &image_embeds, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/moondream/main.rs/0
{ "file_path": "candle/candle-examples/examples/moondream/main.rs", "repo_id": "candle", "token_count": 5484 }
32
# candle-quantized-t5 ## Seq2Seq example This example uses a quantized version of the t5 model. ```bash $ cargo run --example quantized-t5 --release -- --prompt "translate to German: A beautiful candle." ... Eine schรถne Kerze. ``` ## Generating Quantized weight files The weight file is automatically retrieved from the hub. It is also possible to generate quantized weight files from the original safetensors file by using the `tensor-tools` command line utility via: ```bash $ cargo run --bin tensor-tools --release -- quantize --quantization q6k PATH/TO/T5/model.safetensors /tmp/model.gguf ``` ## Using custom models To use a different model, specify the `model-id`. For example, for text editing, you can use quantized [CoEdit models](https://huggingface.co/jbochi/candle-coedit-quantized). ```bash $ cargo run --example quantized-t5 --release -- \ --model-id "jbochi/candle-coedit-quantized" \ --prompt "Make this text coherent: Their flight is weak. They run quickly through the tree canopy." \ --temperature 0 ... Although their flight is weak, they run quickly through the tree canopy. ``` By default, it will look for `model.gguf` and `config.json`, but you can specify custom local or remote `weight-file` and `config-file`s: ```bash cargo run --example quantized-t5 --release -- \ --model-id "jbochi/candle-coedit-quantized" \ --weight-file "model-xl.gguf" \ --config-file "config-xl.json" \ --prompt "Rewrite to make this easier to understand: Note that a storm surge is what forecasters consider a hurricane's most treacherous aspect." \ --temperature 0 ... Note that a storm surge is what forecasters consider a hurricane's most dangerous part. ``` ### [MADLAD-400](https://arxiv.org/abs/2309.04662) MADLAD-400 is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models. ```bash cargo run --example quantized-t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" --weight-file "model-q4k.gguf" \ --prompt "<2de> How are you, my friend?" \ --temperature 0 ... Wie geht es dir, mein Freund? ```
candle/candle-examples/examples/quantized-t5/README.md/0
{ "file_path": "candle/candle-examples/examples/quantized-t5/README.md", "repo_id": "candle", "token_count": 683 }
33
#![allow(unused)] //! Vectorized version of the gym environment. use candle::{DType, Device, Result, Tensor}; use pyo3::prelude::*; use pyo3::types::PyDict; #[derive(Debug)] pub struct Step { pub obs: Tensor, pub reward: Tensor, pub is_done: Tensor, } pub struct VecGymEnv { env: PyObject, action_space: usize, observation_space: Vec<usize>, } fn w(res: PyErr) -> candle::Error { candle::Error::wrap(res) } impl VecGymEnv { pub fn new(name: &str, img_dir: Option<&str>, nprocesses: usize) -> Result<VecGymEnv> { Python::with_gil(|py| { let sys = py.import_bound("sys")?; let path = sys.getattr("path")?; let _ = path.call_method1( "append", ("candle-examples/examples/reinforcement-learning",), )?; let gym = py.import_bound("atari_wrappers")?; let make = gym.getattr("make")?; let env = make.call1((name, img_dir, nprocesses))?; let action_space = env.getattr("action_space")?; let action_space = action_space.getattr("n")?.extract()?; let observation_space = env.getattr("observation_space")?; let observation_space: Vec<usize> = observation_space.getattr("shape")?.extract()?; let observation_space = [vec![nprocesses].as_slice(), observation_space.as_slice()].concat(); Ok(VecGymEnv { env: env.into(), action_space, observation_space, }) }) .map_err(w) } pub fn reset(&self) -> Result<Tensor> { let obs = Python::with_gil(|py| { let obs = self.env.call_method0(py, "reset")?; let obs = obs.call_method0(py, "flatten")?; obs.extract::<Vec<f32>>(py) }) .map_err(w)?; Tensor::new(obs, &Device::Cpu)?.reshape(self.observation_space.as_slice()) } pub fn step(&self, action: Vec<usize>) -> Result<Step> { let (obs, reward, is_done) = Python::with_gil(|py| { let step = self.env.call_method_bound(py, "step", (action,), None)?; let step = step.bind(py); let obs = step.get_item(0)?.call_method("flatten", (), None)?; let obs_buffer = pyo3::buffer::PyBuffer::get_bound(&obs)?; let obs: Vec<u8> = obs_buffer.to_vec(py)?; let reward: Vec<f32> = step.get_item(1)?.extract()?; let is_done: Vec<f32> = step.get_item(2)?.extract()?; Ok((obs, reward, is_done)) }) .map_err(w)?; let obs = Tensor::from_vec(obs, self.observation_space.as_slice(), &Device::Cpu)? .to_dtype(DType::F32)?; let reward = Tensor::new(reward, &Device::Cpu)?; let is_done = Tensor::new(is_done, &Device::Cpu)?; Ok(Step { obs, reward, is_done, }) } pub fn action_space(&self) -> usize { self.action_space } pub fn observation_space(&self) -> &[usize] { &self.observation_space } }
candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs", "repo_id": "candle", "token_count": 1569 }
34
# candle-trocr `TrOCR` is a transformer OCR Model. In this example it is used to transcribe image text. See the associated [model card](https://huggingface.co/microsoft/trocr-base-printed) for details on the model itself. Supported models include: - `--which base`: small handwritten OCR model. - `--which large`: large handwritten OCR model. - `--which base-printed`: small printed OCR model. - `--which large-printed`: large printed OCR model. ## Running an example ```bash cargo run --example trocr --release -- --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which large --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which base-printed --image candle-examples/examples/trocr/assets/noto.png cargo run --example trocr --release -- --which large-printed --image candle-examples/examples/trocr/assets/noto.png ``` ### Outputs ``` industry , Mr. Brown commented icily . " Let us have a industry , " Mr. Brown commented icily . " Let us have a THE QUICK BROWN FOR JUMPS OVER THE LAY DOG THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG ```
candle/candle-examples/examples/trocr/readme.md/0
{ "file_path": "candle/candle-examples/examples/trocr/readme.md", "repo_id": "candle", "token_count": 360 }
35
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use candle_transformers::models::stable_diffusion; use candle_transformers::models::wuerstchen; use anyhow::{Error as E, Result}; use candle::{DType, Device, IndexOp, Tensor}; use clap::Parser; use tokenizers::Tokenizer; const PRIOR_GUIDANCE_SCALE: f64 = 4.0; const RESOLUTION_MULTIPLE: f64 = 42.67; const LATENT_DIM_SCALE: f64 = 10.67; const PRIOR_CIN: usize = 16; const DECODER_CIN: usize = 4; #[derive(Parser)] #[command(author, version, about, long_about = None)] struct Args { /// The prompt to be used for image generation. #[arg( long, default_value = "A very realistic photo of a rusty robot walking on a sandy beach" )] prompt: String, #[arg(long, default_value = "")] uncond_prompt: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] use_flash_attn: bool, /// The height in pixels of the generated image. #[arg(long)] height: Option<usize>, /// The width in pixels of the generated image. #[arg(long)] width: Option<usize>, /// The decoder weight file, in .safetensors format. #[arg(long, value_name = "FILE")] decoder_weights: Option<String>, /// The CLIP weight file, in .safetensors format. #[arg(long, value_name = "FILE")] clip_weights: Option<String>, /// The CLIP weight file used by the prior model, in .safetensors format. #[arg(long, value_name = "FILE")] prior_clip_weights: Option<String>, /// The prior weight file, in .safetensors format. #[arg(long, value_name = "FILE")] prior_weights: Option<String>, /// The VQGAN weight file, in .safetensors format. #[arg(long, value_name = "FILE")] vqgan_weights: Option<String>, #[arg(long, value_name = "FILE")] /// The file specifying the tokenizer to used for tokenization. tokenizer: Option<String>, #[arg(long, value_name = "FILE")] /// The file specifying the tokenizer to used for prior tokenization. prior_tokenizer: Option<String>, /// The number of samples to generate. #[arg(long, default_value_t = 1)] num_samples: i64, /// The name of the final image to generate. #[arg(long, value_name = "FILE", default_value = "sd_final.png")] final_image: String, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum ModelFile { Tokenizer, PriorTokenizer, Clip, PriorClip, Decoder, VqGan, Prior, } impl ModelFile { fn get(&self, filename: Option<String>) -> Result<std::path::PathBuf> { use hf_hub::api::sync::Api; match filename { Some(filename) => Ok(std::path::PathBuf::from(filename)), None => { let repo_main = "warp-ai/wuerstchen"; let repo_prior = "warp-ai/wuerstchen-prior"; let (repo, path) = match self { Self::Tokenizer => (repo_main, "tokenizer/tokenizer.json"), Self::PriorTokenizer => (repo_prior, "tokenizer/tokenizer.json"), Self::Clip => (repo_main, "text_encoder/model.safetensors"), Self::PriorClip => (repo_prior, "text_encoder/model.safetensors"), Self::Decoder => (repo_main, "decoder/diffusion_pytorch_model.safetensors"), Self::VqGan => (repo_main, "vqgan/diffusion_pytorch_model.safetensors"), Self::Prior => (repo_prior, "prior/diffusion_pytorch_model.safetensors"), }; let filename = Api::new()?.model(repo.to_string()).get(path)?; Ok(filename) } } } } fn output_filename( basename: &str, sample_idx: i64, num_samples: i64, timestep_idx: Option<usize>, ) -> String { let filename = if num_samples > 1 { match basename.rsplit_once('.') { None => format!("{basename}.{sample_idx}.png"), Some((filename_no_extension, extension)) => { format!("{filename_no_extension}.{sample_idx}.{extension}") } } } else { basename.to_string() }; match timestep_idx { None => filename, Some(timestep_idx) => match filename.rsplit_once('.') { None => format!("{filename}-{timestep_idx}.png"), Some((filename_no_extension, extension)) => { format!("{filename_no_extension}-{timestep_idx}.{extension}") } }, } } fn encode_prompt( prompt: &str, uncond_prompt: Option<&str>, tokenizer: std::path::PathBuf, clip_weights: std::path::PathBuf, clip_config: stable_diffusion::clip::Config, device: &Device, ) -> Result<Tensor> { let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let pad_id = match &clip_config.pad_with { Some(padding) => *tokenizer.get_vocab(true).get(padding.as_str()).unwrap(), None => *tokenizer.get_vocab(true).get("<|endoftext|>").unwrap(), }; println!("Running with prompt \"{prompt}\"."); let mut tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let tokens_len = tokens.len(); while tokens.len() < clip_config.max_position_embeddings { tokens.push(pad_id) } let tokens = Tensor::new(tokens.as_slice(), device)?.unsqueeze(0)?; println!("Building the clip transformer."); let text_model = stable_diffusion::build_clip_transformer(&clip_config, clip_weights, device, DType::F32)?; let text_embeddings = text_model.forward_with_mask(&tokens, tokens_len - 1)?; match uncond_prompt { None => Ok(text_embeddings), Some(uncond_prompt) => { let mut uncond_tokens = tokenizer .encode(uncond_prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let uncond_tokens_len = uncond_tokens.len(); while uncond_tokens.len() < clip_config.max_position_embeddings { uncond_tokens.push(pad_id) } let uncond_tokens = Tensor::new(uncond_tokens.as_slice(), device)?.unsqueeze(0)?; let uncond_embeddings = text_model.forward_with_mask(&uncond_tokens, uncond_tokens_len - 1)?; let text_embeddings = Tensor::cat(&[text_embeddings, uncond_embeddings], 0)?; Ok(text_embeddings) } } } fn run(args: Args) -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let Args { prompt, uncond_prompt, cpu, height, width, tokenizer, final_image, num_samples, clip_weights, prior_weights, vqgan_weights, decoder_weights, tracing, .. } = args; let _guard = if tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(cpu)?; let height = height.unwrap_or(1024); let width = width.unwrap_or(1024); let prior_text_embeddings = { let tokenizer = ModelFile::PriorTokenizer.get(args.prior_tokenizer)?; let weights = ModelFile::PriorClip.get(args.prior_clip_weights)?; encode_prompt( &prompt, Some(&uncond_prompt), tokenizer.clone(), weights, stable_diffusion::clip::Config::wuerstchen_prior(), &device, )? }; println!("generated prior text embeddings {prior_text_embeddings:?}"); let text_embeddings = { let tokenizer = ModelFile::Tokenizer.get(tokenizer)?; let weights = ModelFile::Clip.get(clip_weights)?; encode_prompt( &prompt, None, tokenizer.clone(), weights, stable_diffusion::clip::Config::wuerstchen(), &device, )? }; println!("generated text embeddings {text_embeddings:?}"); println!("Building the prior."); let b_size = 1; let image_embeddings = { // https://huggingface.co/warp-ai/wuerstchen-prior/blob/main/prior/config.json let latent_height = (height as f64 / RESOLUTION_MULTIPLE).ceil() as usize; let latent_width = (width as f64 / RESOLUTION_MULTIPLE).ceil() as usize; let mut latents = Tensor::randn( 0f32, 1f32, (b_size, PRIOR_CIN, latent_height, latent_width), &device, )?; let prior = { let file = ModelFile::Prior.get(prior_weights)?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[file], DType::F32, &device)? }; wuerstchen::prior::WPrior::new( /* c_in */ PRIOR_CIN, /* c */ 1536, /* c_cond */ 1280, /* c_r */ 64, /* depth */ 32, /* nhead */ 24, args.use_flash_attn, vb, )? }; let prior_scheduler = wuerstchen::ddpm::DDPMWScheduler::new(60, Default::default())?; let timesteps = prior_scheduler.timesteps(); let timesteps = &timesteps[..timesteps.len() - 1]; println!("prior denoising"); for (index, &t) in timesteps.iter().enumerate() { let start_time = std::time::Instant::now(); let latent_model_input = Tensor::cat(&[&latents, &latents], 0)?; let ratio = (Tensor::ones(2, DType::F32, &device)? * t)?; let noise_pred = prior.forward(&latent_model_input, &ratio, &prior_text_embeddings)?; let noise_pred = noise_pred.chunk(2, 0)?; let (noise_pred_text, noise_pred_uncond) = (&noise_pred[0], &noise_pred[1]); let noise_pred = (noise_pred_uncond + ((noise_pred_text - noise_pred_uncond)? * PRIOR_GUIDANCE_SCALE)?)?; latents = prior_scheduler.step(&noise_pred, t, &latents)?; let dt = start_time.elapsed().as_secs_f32(); println!("step {}/{} done, {:.2}s", index + 1, timesteps.len(), dt); } ((latents * 42.)? - 1.)? }; println!("Building the vqgan."); let vqgan = { let file = ModelFile::VqGan.get(vqgan_weights)?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[file], DType::F32, &device)? }; wuerstchen::paella_vq::PaellaVQ::new(vb)? }; println!("Building the decoder."); // https://huggingface.co/warp-ai/wuerstchen/blob/main/decoder/config.json let decoder = { let file = ModelFile::Decoder.get(decoder_weights)?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[file], DType::F32, &device)? }; wuerstchen::diffnext::WDiffNeXt::new( /* c_in */ DECODER_CIN, /* c_out */ DECODER_CIN, /* c_r */ 64, /* c_cond */ 1024, /* clip_embd */ 1024, /* patch_size */ 2, args.use_flash_attn, vb, )? }; for idx in 0..num_samples { // https://huggingface.co/warp-ai/wuerstchen/blob/main/model_index.json let latent_height = (image_embeddings.dim(2)? as f64 * LATENT_DIM_SCALE) as usize; let latent_width = (image_embeddings.dim(3)? as f64 * LATENT_DIM_SCALE) as usize; let mut latents = Tensor::randn( 0f32, 1f32, (b_size, DECODER_CIN, latent_height, latent_width), &device, )?; println!("diffusion process with prior {image_embeddings:?}"); let scheduler = wuerstchen::ddpm::DDPMWScheduler::new(12, Default::default())?; let timesteps = scheduler.timesteps(); let timesteps = &timesteps[..timesteps.len() - 1]; for (index, &t) in timesteps.iter().enumerate() { let start_time = std::time::Instant::now(); let ratio = (Tensor::ones(1, DType::F32, &device)? * t)?; let noise_pred = decoder.forward(&latents, &ratio, &image_embeddings, Some(&text_embeddings))?; latents = scheduler.step(&noise_pred, t, &latents)?; let dt = start_time.elapsed().as_secs_f32(); println!("step {}/{} done, {:.2}s", index + 1, timesteps.len(), dt); } println!( "Generating the final image for sample {}/{}.", idx + 1, num_samples ); let image = vqgan.decode(&(&latents * 0.3764)?)?; let image = (image.clamp(0f32, 1f32)? * 255.)? .to_dtype(DType::U8)? .i(0)?; let image_filename = output_filename(&final_image, idx + 1, num_samples, None); candle_examples::save_image(&image, image_filename)? } Ok(()) } fn main() -> Result<()> { let args = Args::parse(); run(args) }
candle/candle-examples/examples/wuerstchen/main.rs/0
{ "file_path": "candle/candle-examples/examples/wuerstchen/main.rs", "repo_id": "candle", "token_count": 6372 }
36
pub const NAMES: [&str; 80] = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", ];
candle/candle-examples/src/coco_classes.rs/0
{ "file_path": "candle/candle-examples/src/coco_classes.rs", "repo_id": "candle", "token_count": 648 }
37
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. // This file is auto-generated. See "generate_kernels.py" #include "flash_fwd_launch_template.h" template<> void run_mha_fwd_<cutlass::half_t, 128, true>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim128<cutlass::half_t, true>(params, stream); }
candle/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_causal_sm80.cu/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_causal_sm80.cu", "repo_id": "candle", "token_count": 139 }
38
// This header is not specific to our application and you'll probably want // something like this for any extension you're building. This includes the // infrastructure needed to serialize descriptors that are used with the // "opaque" parameter of the GPU custom call. In our example we'll use this // parameter to pass the size of our problem. #ifndef _GPU_OPS_KERNEL_HELPERS_H_ #define _GPU_OPS_KERNEL_HELPERS_H_ #include <cstdint> #include <stdexcept> #include <string> #include <type_traits> #define JAX_APEX_WARP_SIZE 32 namespace gpu_ops { // https://en.cppreference.com/w/cpp/numeric/bit_cast template <class To, class From> typename std::enable_if<sizeof(To) == sizeof(From) && std::is_trivially_copyable<From>::value && std::is_trivially_copyable<To>::value, To>::type bit_cast(const From &src) noexcept { static_assert(std::is_trivially_constructible<To>::value, "This implementation additionally requires destination type to " "be trivially constructible"); To dst; memcpy(&dst, &src, sizeof(To)); return dst; } template <typename T> std::string PackDescriptorAsString(const T &descriptor) { return std::string(bit_cast<const char *>(&descriptor), sizeof(T)); } template <typename T> const T *UnpackDescriptor(const char *opaque, std::size_t opaque_len) { if (opaque_len != sizeof(T)) { throw std::runtime_error("Invalid opaque object size"); } return bit_cast<const T *>(opaque); } } // namespace gpu_ops #endif
candle/candle-flash-attn/kernels/kernel_helpers.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernel_helpers.h", "repo_id": "candle", "token_count": 600 }
39
#include "cuda_utils.cuh" #include<stdint.h> #define AFFINE_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const TYPENAME *inp, \ TYPENAME *out, \ const TYPENAME mul, \ const TYPENAME add \ ) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ TYPENAME x = inp ? inp[i] : out[i]; \ out[i] = x * mul + add; \ } \ } \ else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ TYPENAME x = inp ? inp[strided_i] : out[i]; \ out[i] = x * mul + add; \ } \ } \ } \ #if __CUDA_ARCH__ >= 800 AFFINE_OP(__nv_bfloat16, affine_bf16) #endif #if __CUDA_ARCH__ >= 530 AFFINE_OP(__half, affine_f16) #endif AFFINE_OP(float, affine_f32) AFFINE_OP(double, affine_f64) AFFINE_OP(uint8_t, affine_u8) AFFINE_OP(uint32_t, affine_u32) AFFINE_OP(int64_t, affine_i64)
candle/candle-kernels/src/affine.cu/0
{ "file_path": "candle/candle-kernels/src/affine.cu", "repo_id": "candle", "token_count": 659 }
40
# candle-metal-kernels This crate contains Metal kernels used from candle.
candle/candle-metal-kernels/README.md/0
{ "file_path": "candle/candle-metal-kernels/README.md", "repo_id": "candle", "token_count": 18 }
41
use candle_metal_kernels::{call_affine, Kernels}; use metal::objc::rc::autoreleasepool; use metal::{Device, MTLResourceOptions}; use rand; use std::any::type_name; use std::time::Instant; fn main() { let device = Device::system_default().unwrap(); let kernels = Kernels::new(); let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>(); let f32_10k = (0..10000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f32_100k = (0..100000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}", "dtype", "kernel", "size", "runs", "total time", "avg time" ); // f32 run_affine_bench(&device, &kernels, &f32_1k); run_affine_bench(&device, &kernels, &f32_10k); run_affine_bench(&device, &kernels, &f32_100k); } fn run_affine_bench<T: Clone>(device: &Device, kernels: &Kernels, v: &[T]) { let command_queue = device.new_command_queue(); let options = MTLResourceOptions::StorageModeManaged; let iterations = 10000; let input = device.new_buffer_with_data( v.as_ptr() as *const core::ffi::c_void, core::mem::size_of_val(v) as u64, options, ); let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options); let mul: f32 = 1.2345; let add: f32 = 2.3456; let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_affine( &device, command_buffer, &kernels, "affine_float", v.len(), &input, &mut output, mul, add, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), "affine", v.len(), iterations, total_time, total_time / iterations ); }
candle/candle-metal-kernels/tmp/affine.rs/0
{ "file_path": "candle/candle-metal-kernels/tmp/affine.rs", "repo_id": "candle", "token_count": 1154 }
42
//! Encoding Utilities. (e.g., one-hot/cold encoding) use candle::{bail, DType, Result, Tensor, WithDType}; /// One-hot/cold encoding. /// /// Given an input tensor of indices, this function returns a tensor of the same shape as the input /// tensor with an additional dimension of the given depth size. The values in the returned tensor are /// all set to the `off_value` except for the positions represented by the indices, which are set to the `on_value`. /// /// This method returns a tensor with a rank that is one rank larger than the input tensor. /// /// As an example, the following tensor will be encoded to a one-hot matrix: /// /// `[[0i64, 2], [1, -1]]` /// /// with a depth of 4 will be encoded to: /// /// `[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 0, 0]]]` /// /// When the input tensor index has a value of -1, the corresponding one-hot vector will be ignored, /// resulting in a vector of values set to the `off_value`. /// /// /// This method supports one-cold encoding by setting `on_value` to `0` and `off_value` to `1`. /// By default `on_value` is `1` and `off_value` is `0`. /// /// Other encoding values can be used by setting `on_value` and `off_value` to the desired values. /// /// # Examples /// /// ## One-hot encoding /// /// ```rust /// use candle::{Shape, Tensor, Device}; /// use candle_nn::encoding::one_hot; /// /// let device = candle::Device::Cpu; /// /// let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device).unwrap(); /// let depth = 4; /// let one_hot = one_hot(indices, depth, 1f32, 0f32).unwrap(); /// /// let expected_matrix = [ /// [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], /// [[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], /// ]; /// /// assert_eq!(one_hot.shape(), &Shape::from((2, 2, depth))); /// /// let matrix = one_hot.to_vec3::<f32>().unwrap(); /// /// assert_eq!(matrix, expected_matrix); ///``` /// ## One-cold Encoding /// /// ```rust /// use candle::{Shape, Tensor, Device}; /// use candle_nn::encoding::one_hot; /// /// /// let device = candle::Device::Cpu; /// let depth = 4; /// let indices = Tensor::new(vec![vec![0u8, 2], vec![1, 3]], &device).unwrap(); /// let one_cold = one_hot(indices, depth, 0u8, 1u8).unwrap(); /// /// let expected_matrix = [[[0, 1, 1, 1], [1, 1, 0, 1]], [[1, 0, 1, 1], [1, 1, 1, 0]]]; /// /// assert_eq!(one_cold.shape(), &Shape::from((2, 2, depth))); /// /// let matrix = one_cold.to_vec3::<u8>().unwrap(); /// /// assert_eq!(matrix, expected_matrix); /// ``` /// /// /// # Bails /// /// This method bails if: /// - One of the index value is less than -1. /// - One of the index value is greater than or equal to the depth value. /// - The input data type is not `U8`, `U32`, or `I64`. /// /// # API Design /// /// The api design for this method is loosely based on the [TensorFlow One-Hot](https://www.tensorflow.org/api_docs/python/tf/one_hot) method. pub fn one_hot<D: WithDType>( indices: Tensor, depth: usize, on_value: D, off_value: D, ) -> Result<Tensor> { let mut target_shape = indices.dims().to_vec(); target_shape.push(depth); let indices = indices.flatten_all()?; let mut out = vec![off_value; depth * indices.elem_count()]; match indices.dtype() { DType::U8 => { let indices = indices.to_vec1::<u8>()?; for (i, &index) in indices.iter().enumerate() { set_at_index(index, i * depth, depth, &mut out, on_value)?; } } DType::U32 => { let indices = indices.to_vec1::<u32>()?; for (i, &index) in indices.iter().enumerate() { set_at_index(index, i * depth, depth, &mut out, on_value)?; } } DType::I64 => { let indices = indices.to_vec1::<i64>()?; for (i, &index) in indices.iter().enumerate() { set_at_index(index, i * depth, depth, &mut out, on_value)?; } } dtype => { bail!("one_hot: unsupported data type {dtype:?}, expected U8, U32, or I64") } }; Tensor::from_vec(out, target_shape, indices.device()) } fn set_at_index<D: WithDType, I: Into<i64>>( value: I, offset: usize, depth: usize, v: &mut [D], on_value: D, ) -> Result<()> { let value = value.into(); // Skip for an entire row of off_values if value == -1 { return Ok(()); } if value < -1 { bail!( "one_hot: invalid negative index value {value}, expected a positive index value or -1" ); } let value = value as usize; if value >= depth { bail!("one_hot: index value {value} exceeds depth {depth}") } let idx = offset + value; if idx >= v.len() { bail!("one_hot: index out of bounds {idx}, len {}", v.len()); } v[idx] = on_value; Ok(()) }
candle/candle-nn/src/encoding.rs/0
{ "file_path": "candle/candle-nn/src/encoding.rs", "repo_id": "candle", "token_count": 2025 }
43
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{test_utils, DType, Device, Tensor}; use candle_nn::{batch_norm, BatchNorm, BatchNormConfig, VarBuilder, VarMap}; /* The test below has been generated using the following PyTorch code: import torch torch.manual_seed(19551105) m = torch.nn.BatchNorm2d(5, affine=False) input = torch.randn(2, 5, 3, 4) output = m(input) print(input.flatten()) print(output.flatten()) print(m.running_mean) print(m.running_var) */ #[test] fn batch_norm_test() -> Result<()> { let running_mean = Tensor::zeros(5, DType::F32, &Device::Cpu)?; let running_var = Tensor::ones(5, DType::F32, &Device::Cpu)?; let bn = BatchNorm::new_no_bias(5, running_mean.clone(), running_var.clone(), 1e-8)?; let input: [f32; 120] = [ -0.7493, -1.0410, 1.6977, -0.6579, 1.7982, -0.0087, 0.2812, -0.1190, 0.2908, -0.5975, -0.0278, -0.2138, -1.3130, -1.6048, -2.2028, 0.9452, 0.4002, 0.0831, 1.0004, 0.1860, 0.5004, 0.5539, 0.9991, -0.2540, -0.0703, -0.3752, -0.1096, -0.2374, 1.0258, -2.2208, -0.0257, 0.6073, -1.1627, -0.0964, -1.9718, 1.6577, 0.1931, -0.3692, -0.8011, 0.9059, 0.4797, 0.6521, -0.0165, -0.6683, -0.4148, 2.0649, -0.8276, 1.7947, -0.2061, 0.5812, -1.3598, 1.6192, 1.0466, -0.4423, 0.4202, 0.1749, 0.6969, 0.2616, -0.0369, -1.4951, -0.0814, -0.1877, 0.0267, 0.6150, 0.2402, -1.1440, -2.0068, 0.6032, -2.6639, 0.8260, 0.1085, -0.1693, 1.2805, 0.7654, -0.4930, 0.3770, 1.1309, 0.2303, 0.2949, -0.2634, -0.5225, 0.4269, 0.6341, 1.5736, 0.9827, -1.2499, 0.3509, -1.6243, -0.8123, 0.7634, -0.3047, 0.0143, -0.4032, 0.0537, 0.7022, 0.8405, -1.2221, -1.6847, -0.0714, -0.1608, 0.5579, -1.5858, 0.4617, -0.6480, 0.1332, 0.0419, -0.9784, 0.4173, 1.2313, -1.9046, -0.1656, 0.1259, 0.0763, 1.4252, -0.9115, -0.1093, -0.3100, -0.6734, -1.4357, 0.9205, ]; let input = Tensor::new(&input, &Device::Cpu)?.reshape((2, 5, 3, 4))?; let output = bn.forward_train(&input)?; assert_eq!(output.dims(), &[2, 5, 3, 4]); let output = output.flatten_all()?; assert_eq!( test_utils::to_vec1_round(&output, 4)?, &[ -0.6391, -0.9414, 1.8965, -0.5444, 2.0007, 0.1283, 0.4287, 0.014, 0.4387, -0.4818, 0.1085, -0.0842, -1.6809, -2.0057, -2.6714, 0.8328, 0.2262, -0.1268, 0.8943, -0.0123, 0.3377, 0.3973, 0.8928, -0.5021, 0.0861, -0.2324, 0.0451, -0.0884, 1.2311, -2.1603, 0.1327, 0.7939, -1.055, 0.0589, -1.9002, 1.8912, 0.2918, -0.3253, -0.7993, 1.0741, 0.6063, 0.7955, 0.0617, -0.6536, -0.3754, 2.3461, -0.8284, 2.0495, -0.201, 0.6476, -1.4446, 1.7665, 1.1493, -0.4556, 0.4741, 0.2097, 0.7723, 0.3031, -0.0186, -1.5905, 0.053, -0.0572, 0.165, 0.7746, 0.3862, -1.0481, -1.9422, 0.7624, -2.6231, 0.9933, 0.2498, -0.0381, 1.2061, 0.6327, -0.7681, 0.2004, 1.0396, 0.037, 0.109, -0.5125, -0.8009, 0.2559, 0.4865, 1.5324, 1.1861, -1.1461, 0.5261, -1.5372, -0.689, 0.957, -0.1587, 0.1745, -0.2616, 0.2156, 0.8931, 1.0375, -1.2614, -1.7691, 0.0015, -0.0966, 0.6921, -1.6605, 0.5866, -0.6313, 0.226, 0.1258, -0.9939, 0.5378, 1.3484, -2.0319, -0.1574, 0.1568, 0.1034, 1.5574, -0.9614, -0.0967, -0.313, -0.7047, -1.5264, 1.0134 ] ); let bn2 = BatchNorm::new( 5, running_mean, running_var, Tensor::new(&[0.5f32], &Device::Cpu)?.broadcast_as(5)?, Tensor::new(&[-1.5f32], &Device::Cpu)?.broadcast_as(5)?, 1e-8, )?; let output2 = bn2.forward_train(&input)?; assert_eq!(output2.dims(), &[2, 5, 3, 4]); let output2 = output2.flatten_all()?; let diff2 = ((output2 - (output * 0.5)?)? + 1.5)?.sqr()?; let sum_diff2 = diff2.sum_keepdim(0)?; assert_eq!(test_utils::to_vec1_round(&sum_diff2, 4)?, &[0f32]); assert_eq!( test_utils::to_vec1_round(bn.running_mean(), 4)?, &[-0.0133, 0.0197, -0.0153, -0.0073, -0.0020] ); assert_eq!( test_utils::to_vec1_round(bn.running_var(), 4)?, &[0.9972, 0.9842, 0.9956, 0.9866, 0.9898] ); Ok(()) } // This test makes sure that we can train a batch norm layer using a VarMap. #[test] fn train_batch_norm() -> Result<()> { let vm = VarMap::new(); let vb = VarBuilder::from_varmap(&vm, DType::F32, &Device::Cpu); let bn = batch_norm(1, BatchNormConfig::default(), vb)?; // Get a copy of the original mean to ensure it is being updated. let original_mean = bn.running_mean().detach().copy()?; let var_map_mean = { vm.data() .lock() .unwrap() .get("running_mean") .unwrap() .clone() }; // Ensure the var map mean is the same as the running mean. assert_eq!( test_utils::to_vec1_round(bn.running_mean(), 4)?, test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?, ); // Train with a something guaranteed to be different from the running mean. let mean_plus_one = { let one = original_mean.ones_like()?; original_mean.add(&one)?.reshape((1, 1))? }; bn.forward_train(&mean_plus_one)?; // Assert that the running mean has been updated. assert_ne!( test_utils::to_vec1_round(bn.running_mean(), 4)?, test_utils::to_vec1_round(&original_mean, 4)?, ); // Assert that the var map mean has been updated. assert_eq!( test_utils::to_vec1_round(bn.running_mean(), 4)?, test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?, ); Ok(()) }
candle/candle-nn/tests/batch_norm.rs/0
{ "file_path": "candle/candle-nn/tests/batch_norm.rs", "repo_id": "candle", "token_count": 3126 }
44
[package] name = "candle-pyo3" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [lib] name = "candle" crate-type = ["cdylib"] [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-nn = { workspace = true } candle-onnx = { workspace = true, optional = true } half = { workspace = true } intel-mkl-src = { workspace = true, optional = true } pyo3 = { version = "0.21.0", features = ["extension-module", "abi3-py38"] } [build-dependencies] pyo3-build-config = "0.21" [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate"] cuda = ["candle/cuda"] mkl = ["dep:intel-mkl-src","candle/mkl"] onnx = ["dep:candle-onnx"]
candle/candle-pyo3/Cargo.toml/0
{ "file_path": "candle/candle-pyo3/Cargo.toml", "repo_id": "candle", "token_count": 315 }
45
from candle import Tensor, QTensor, DType from typing import ( Dict, Tuple, Any, Optional, Union, Iterator, Set, overload, Mapping, TypeVar, List, ) from collections import OrderedDict, namedtuple TensorLike = Union[Tensor, QTensor] T = TypeVar("T", bound="Module") class _IncompatibleKeys(namedtuple("IncompatibleKeys", ["missing_keys", "unexpected_keys"])): def __repr__(self): if not self.missing_keys and not self.unexpected_keys: return "<All keys matched successfully>" return super().__repr__() __str__ = __repr__ # see: https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/module.py class Module: """ Pytorch like Module. Base class for all neural network modules. Your models should also subclass this class. """ _modules: Dict[str, Optional["Module"]] _buffers: Dict[str, Optional[TensorLike]] _non_persistent_buffers_set: Set[str] _quantizable_buffers: Set[str] _version: int = 1 def __init__(self, *args, **kwargs) -> None: """ Initializes internal Module state """ super().__setattr__("_modules", OrderedDict()) super().__setattr__("_buffers", OrderedDict()) super().__setattr__("_non_persistent_buffers_set", set()) super().__setattr__("_quantizable_buffers", set()) def __call__(self, *input): """ Call self as a function. """ return self.forward(*input) def forward(self, *input): """ Defines the computation performed at every call. Should be overridden by all subclasses. """ pass def children(self) -> Iterator["Module"]: r"""Returns an iterator over immediate children modules. Yields: Module: a child module """ for name, module in self.named_children(): yield module def named_children(self) -> Iterator[Tuple[str, "Module"]]: r"""Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module) """ memo = set() for name, module in self._modules.items(): if module is not None and module not in memo: memo.add(module) yield name, module def add_module(self, name: str, module: Optional["Module"]) -> None: r"""Adds a child module to the current module. The module can be accessed as an attribute using the given name. Args: name (str): name of the child module. The child module can be accessed from this module using the given name module (Module): child module to be added to the module. """ if not isinstance(module, Module) and module is not None: raise TypeError(f"{str(module)} is not a Module subclass") elif not isinstance(name, str): raise TypeError(f"module name should be a string. Got {name}") elif hasattr(self, name) and name not in self._modules: raise KeyError(f"attribute '{name}' already exists") elif "." in name: raise KeyError(f'module name can\'t contain ".", got: {name}') elif name == "": raise KeyError('module name can\'t be empty string ""') self._modules[name] = module def register_module(self, name: str, module: Optional["Module"]) -> None: r"""Alias for :func:`add_module`.""" self.add_module(name, module) def modules(self) -> Iterator["Module"]: r"""Returns an iterator over all modules in the network.""" for _, module in self.named_modules(): yield module def named_modules( self, memo: Optional[Set["Module"]] = None, prefix: str = "", remove_duplicate: bool = True, ): r"""Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. """ if memo is None: memo = set() if self not in memo: if remove_duplicate: memo.add(self) yield prefix, self for name, module in self._modules.items(): if module is None: continue submodule_prefix = prefix + ("." if prefix else "") + name for m in module.named_modules(memo, submodule_prefix, remove_duplicate): yield m def buffers(self, recurse: bool = True) -> Iterator[TensorLike]: """ Returns an iterator over module buffers. """ for name, buf in self.named_buffers(recurse=recurse): yield buf def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, TensorLike]]: r"""Returns an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, Tensor): Tuple containing the name and buffer Example:: >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size()) """ gen = self._named_members( lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate, ) yield from gen # The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns # back that same object. But if they pass nothing, an `OrderedDict` is created and returned. T_destination = TypeVar("T_destination", bound=Dict[str, Any]) @overload def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: ... @overload def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]: ... def state_dict(self, *args, destination=None, prefix="", keep_vars=False): r"""Returns a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. .. note:: The returned object is a shallow copy. It contains references to the module's parameters and buffers. .. warning:: Currently ``state_dict()`` also accepts positional arguments for ``destination``, ``prefix`` and ``keep_vars`` in order. However, this is being deprecated and keyword arguments will be enforced in future releases. .. warning:: Please avoid the use of argument ``destination`` as it is not designed for end-users. Args: destination (dict, optional): If provided, the state of module will be updated into the dict and the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned. Default: ``None``. prefix (str, optional): a prefix added to parameter and buffer names to compose the keys in state_dict. Default: ``''``. keep_vars (bool, optional): by default the :class:`~candle.Tensor` s returned in the state dict are detached from autograd. If it's set to ``True``, detaching will not be performed. Default: ``False``. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> module.state_dict().keys() ['bias', 'weight'] """ # TODO: Remove `args` and the parsing logic when BC allows. if len(args) > 0: if destination is None: destination = args[0] if len(args) > 1 and prefix == "": prefix = args[1] if len(args) > 2 and keep_vars is False: keep_vars = args[2] if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() local_metadata = dict(version=self._version) if hasattr(destination, "_metadata"): destination._metadata[prefix[:-1]] = local_metadata self._save_to_state_dict(destination, prefix, keep_vars) for name, module in self._modules.items(): if module is not None: module.state_dict( destination=destination, prefix=prefix + name + ".", keep_vars=keep_vars, ) return destination def _save_to_state_dict(self, destination, prefix, keep_vars): r"""Saves module state to `destination` dictionary, containing a state of the module, but not its descendants. This is called on every submodule in :meth:`~candle.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: if isinstance(buf, Tensor): destination[prefix + name] = buf if keep_vars else buf.detach() else: destination[prefix + name] = buf def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False): r"""Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~candle.nn.Module.state_dict` function. .. warning:: If :attr:`assign` is ``True`` the optimizer must be created after the call to :attr:`load_state_dict`. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~candle.nn.Module.state_dict` function. Default: ``True`` assign (bool, optional): whether to assign items in the state dictionary to their corresponding keys in the module instead of copying them inplace into the module's current parameters and buffers. When ``False``, the properties of the tensors in the current module are preserved while when ``True``, the properties of the Tensors in the state dict are preserved. Default: ``False`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Expected state_dict to be dict-like, got {type(state_dict)}.") missing_keys: List[str] = [] unexpected_keys: List[str] = [] error_msgs: List[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = OrderedDict(state_dict) if metadata is not None: # mypy isn't aware that "_metadata" exists in state_dict state_dict._metadata = metadata # type: ignore[attr-defined] def load(module, local_state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) if assign: local_metadata["assign_to_params_buffers"] = assign module._load_from_state_dict( local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs, ) for name, child in module._modules.items(): if child is not None: child_prefix = prefix + name + "." child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} load(child, child_state_dict, child_prefix) load(self, state_dict) del load if strict: if len(unexpected_keys) > 0: error_msgs.insert( 0, "Unexpected key(s) in state_dict: {}. ".format(", ".join(f'"{k}"' for k in unexpected_keys)), ) if len(missing_keys) > 0: error_msgs.insert( 0, "Missing key(s) in state_dict: {}. ".format(", ".join(f'"{k}"' for k in missing_keys)), ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(self.__class__.__name__, "\n\t".join(error_msgs)) ) return _IncompatibleKeys(missing_keys, unexpected_keys) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~candle.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. Additionally, :attr:`local_metadata` can also contain the key `assign_to_params_buffers` that indicates whether keys should be assigned their corresponding tensor in the state_dict. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~candle.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~candle.nn.Module.load_state_dict` """ persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = persistent_buffers.items() local_state = {k: v for k, v in local_name_params if v is not None} for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if not isinstance(input_param, (Tensor, QTensor)): error_msgs.append( f'While copying the parameter named "{key}", ' "expected Tensor-like object from checkpoint but " f"received {type(input_param)}" ) continue if input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( "size mismatch for {}: copying a param with shape {} from checkpoint, " "the shape in current model is {}.".format(key, input_param.shape, param.shape) ) continue try: # Shape checks are already done above -> Just assign tensor setattr(self, name, input_param) except Exception as ex: error_msgs.append( f'While copying the parameter named "{key}", ' f"whose dimensions in the model are {param.shape} and " f"whose dimensions in the checkpoint are {input_param.shape}, " f"an exception occurred : {ex.args}." ) elif strict: missing_keys.append(key) if strict: for key in state_dict.keys(): if key.startswith(prefix): input_name = key[len(prefix) :] input_name = input_name.split(".", 1)[0] # get the name of param/buffer/child if input_name not in self._modules and input_name not in local_state: unexpected_keys.append(key) def _named_members(self, get_members_fn, prefix="", recurse=True, remove_duplicate: bool = True): r"""Helper method for yielding various names + members of modules.""" memo = set() modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)] for module_prefix, module in modules: members = get_members_fn(module) for k, v in members: if v is None or v in memo: continue if remove_duplicate: memo.add(v) name = module_prefix + ("." if module_prefix else "") + k yield name, v def _get_name(self): return self.__class__.__name__ def _apply(self, fn): for module in self.children(): module._apply(fn) for key, buf in self._buffers.items(): if buf is not None: self._buffers[key] = fn(buf) return self def __move_tensor_to_device(self, tensor: TensorLike, device: str): if isinstance(tensor, Tensor): return tensor.to_device(device) else: raise NotImplementedError("Cannot offload QTensor to cuda, yet!") def device(self) -> str: """ Gets the device of the module, by inspecting its tensors. """ tensor = next(self.buffers()) if isinstance(tensor, Tensor): return tensor.device else: # QTensors can only be on the CPU return "cpu" def cuda(self: T) -> T: r"""Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. .. note:: This method modifies the module in-place. Returns: Module: self """ def to_cuda(t: TensorLike): return self.__move_tensor_to_device(t, "cuda") return self._apply(to_cuda) def cpu(self: T) -> T: r"""Moves all model parameters and buffers to the CPU. .. note:: This method modifies the module in-place. Returns: Module: self """ def to_cpu(t: TensorLike): return self.__move_tensor_to_device(t, "cpu") return self._apply(to_cpu) def __cast_tensor(self, tensor: TensorLike, dtype: Union[DType, str]): if isinstance(tensor, Tensor): return tensor.to_dtype(dtype) else: raise TypeError("candle.Module.to only accepts Tensor dtypes, but got desired dtype={}".format(dtype)) def type(self: T, dst_type: Union[DType, str]) -> T: r"""Casts all parameters and buffers to :attr:`dst_type`. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self """ def cast(t: TensorLike): return self.__cast_tensor(t, dst_type) return self._apply(cast) @overload def to( self: T, device: str = ..., dtype: Optional[Union[DType, str]] = ..., ) -> T: ... @overload def to(self: T, dtype: Union[DType, str]) -> T: ... def to(self, *args, **kwargs): r"""Moves and/or casts the parameters and buffers. This can be called as .. function:: to(device=None, dtype=None) :noindex: .. function:: to(dtype) :noindex: See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`candle.device`): the desired device of the parameters and buffers in this module dtype (:class:`candle.dtype`): the desired floating point dtype of the parameters and buffers in this module Returns: Module: self """ device = None dtype = None if args: for arg in args: # Assuming arg can be a string representing a device or a dtype if isinstance(arg, str): lower_arg = str(arg).lower() if lower_arg.startswith("cuda") or lower_arg == "cpu": device = lower_arg else: dtype = arg elif isinstance(arg, DType): dtype = str(arg) else: raise TypeError("Module.to() received an invalid combination of arguments. Got: {}".format(args)) if kwargs: device = kwargs.get("device", device) dtype = str(kwargs.get("dtype", dtype)) if device: device = device.lower() if dtype: dtype = dtype.lower() if dtype not in ["f32", "f16", "f64"]: raise TypeError( "candle.Module.to only accepts floating point" "dtypes, but got desired dtype={}".format(dtype) ) def convert(t): if dtype: t = self.__cast_tensor(t, dtype) if device: t = self.__move_tensor_to_device(t, device) return t return self._apply(convert) def __setattr__(self, __name: str, __value: Any) -> None: if isinstance(__value, Module): self._modules[__name] = __value elif isinstance(__value, QTensor): if __name in self._quantizable_buffers: type = __value.ggml_dtype.lower() if type in ["f32", "f16"]: # It is faster to just dequantize the tensor here and use the normal tensor operations dequant = __value.dequantize() if type == "f16": dequant = dequant.to_dtype("f16") self._buffers[__name] = dequant else: self._buffers[__name] = __value else: # We expect a normal tensor here => dequantize it self._buffers[__name] = __value.dequantize() elif isinstance(__value, Tensor): self._buffers[__name] = __value else: super().__setattr__(__name, __value) def __getattr__(self, __name: str) -> Any: if "_modules" in self.__dict__: modules = self.__dict__["_modules"] if __name in modules: return modules[__name] if "_buffers" in self.__dict__: tensors = self.__dict__["_buffers"] if __name in tensors: return tensors[__name] return super().__getattribute__(__name) def __delattr__(self, name): if name in self._buffers: del self._buffers[name] elif name in self._modules: del self._modules[name] else: super().__delattr__(name)
candle/candle-pyo3/py_src/candle/nn/module.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/module.py", "repo_id": "candle", "token_count": 12028 }
46
import candle print(f"mkl: {candle.utils.has_mkl()}") print(f"accelerate: {candle.utils.has_accelerate()}") print(f"num-threads: {candle.utils.get_num_threads()}") print(f"cuda: {candle.utils.cuda_is_available()}") t = candle.Tensor(42.0) print(t) print(t.shape, t.rank, t.device) print(t + t) t = candle.Tensor([3.0, 1, 4, 1, 5, 9, 2, 6]) print(t) print(t + t) t = t.reshape([2, 4]) print(t.matmul(t.t())) print(t.to_dtype(candle.u8)) print(t.to_dtype("u8")) t = candle.randn((5, 3)) print(t) print(t.dtype) t = candle.randn((16, 256)) quant_t = t.quantize("q6k") dequant_t = quant_t.dequantize() diff2 = (t - dequant_t).sqr() print(diff2.mean_all())
candle/candle-pyo3/test.py/0
{ "file_path": "candle/candle-pyo3/test.py", "repo_id": "candle", "token_count": 340 }
47
use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder}; fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> { let weight = vb.get(size, "weight")?; let bias = vb.get(size, "bias")?; Ok(LayerNorm::new(weight, bias, eps)) } fn make_causal_mask(t: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j <= i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; Ok(mask) } #[derive(Debug)] pub struct Config { pub vocab_size: usize, // max_position_embeddings aka n_positions pub max_position_embeddings: usize, // num_hidden_layers aka n_layer pub num_hidden_layers: usize, // hidden_size aka n_embd pub hidden_size: usize, pub layer_norm_epsilon: f64, pub n_inner: Option<usize>, // num_attention_heads aka n_head pub num_attention_heads: usize, pub multi_query: bool, pub use_cache: bool, } impl Config { #[allow(dead_code)] pub fn starcoder_1b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 24, hidden_size: 2048, layer_norm_epsilon: 1e-5, n_inner: Some(8192), num_attention_heads: 16, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder_3b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 36, hidden_size: 2816, layer_norm_epsilon: 1e-5, n_inner: Some(11264), num_attention_heads: 22, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder_7b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 42, hidden_size: 4096, layer_norm_epsilon: 1e-5, n_inner: Some(16384), num_attention_heads: 32, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 40, hidden_size: 6144, layer_norm_epsilon: 1e-5, n_inner: Some(24576), num_attention_heads: 48, multi_query: true, use_cache: true, } } } struct Attention { c_attn: Linear, c_proj: Linear, kv_cache: Option<Tensor>, use_cache: bool, embed_dim: usize, kv_dim: usize, num_heads: usize, head_dim: usize, multi_query: bool, } impl Attention { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let head_dim = hidden_size / cfg.num_attention_heads; let kv_heads = if cfg.multi_query { 1 } else { cfg.num_attention_heads }; let kv_dim = kv_heads * head_dim; let c_attn = linear(hidden_size, hidden_size + 2 * kv_dim, true, vb.pp("c_attn"))?; let c_proj = linear(hidden_size, hidden_size, true, vb.pp("c_proj"))?; Ok(Self { c_proj, c_attn, embed_dim: hidden_size, kv_cache: None, use_cache: cfg.use_cache, kv_dim, head_dim, num_heads: cfg.num_attention_heads, multi_query: cfg.multi_query, }) } fn attn( &self, query: &Tensor, key: &Tensor, value: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { if query.dtype() != DType::F32 { // If we start supporting f16 models, we may need the upcasting scaling bits. // https://github.com/huggingface/transformers/blob/a0042379269bea9182c1f87e6b2eee4ba4c8cce8/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py#L133 candle::bail!("upcasting is not supported {:?}", query.dtype()) } let scale_factor = 1f64 / (self.head_dim as f64).sqrt(); let initial_query_shape = query.shape(); let key_len = key.dim(D::Minus1)?; let (query, key, attn_shape, attn_view) = if self.multi_query { let (b_sz, query_len, _) = query.dims3()?; let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?; let attn_shape = (b_sz, query_len, self.num_heads, key_len); let attn_view = (b_sz, query_len * self.num_heads, key_len); (query, key.clone(), attn_shape, attn_view) } else { let (b_sz, _num_heads, query_len, _head_dim) = query.dims4()?; let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?; let key = key.reshape((b_sz * self.num_heads, self.head_dim, key_len))?; let attn_shape = (b_sz, self.num_heads, query_len, key_len); let attn_view = (b_sz * self.num_heads, query_len, key_len); (query, key, attn_shape, attn_view) }; let attn_weights = (query.matmul(&key.contiguous()?)? * scale_factor)?.reshape(attn_shape)?; let attention_mask = attention_mask.broadcast_as(attn_shape)?; let mask_value = Tensor::new(f32::NEG_INFINITY, query.device())?.broadcast_as(attn_shape)?; let attn_weights = attention_mask.where_cond(&attn_weights, &mask_value)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let value = value.contiguous()?; let attn_output = if self.multi_query { attn_weights .reshape(attn_view)? .matmul(&value)? .reshape(initial_query_shape)? } else { attn_weights.matmul(&value)? }; Ok(attn_output) } fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let qkv = self.c_attn.forward(hidden_states)?; let (query, key_value) = if self.multi_query { let query = qkv.i((.., .., ..self.embed_dim))?; let key_value = qkv.i((.., .., self.embed_dim..self.embed_dim + 2 * self.kv_dim))?; (query, key_value) } else { let mut dims = qkv.dims().to_vec(); dims.pop(); dims.push(self.embed_dim); dims.push(self.head_dim * 3); let qkv = qkv.reshape(dims)?.transpose(1, 2)?; let query = qkv.i((.., .., .., ..self.head_dim))?; let key_value = qkv.i((.., .., .., self.head_dim..3 * self.head_dim))?; (query, key_value) }; let mut key_value = key_value; if self.use_cache { if let Some(kv_cache) = &self.kv_cache { // TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for // arbitrarily large sizes. key_value = Tensor::cat(&[kv_cache, &key_value], D::Minus2)?.contiguous()?; } self.kv_cache = Some(key_value.clone()) } let key = key_value.narrow(D::Minus1, 0, self.head_dim)?; let value = key_value.narrow(D::Minus1, self.head_dim, self.head_dim)?; let attn_output = self.attn(&query, &key.t()?, &value, attention_mask)?; let attn_output = if self.multi_query { attn_output } else { attn_output .transpose(1, 2)? .reshape(hidden_states.shape())? }; let attn_output = self.c_proj.forward(&attn_output)?; Ok(attn_output) } } struct Mlp { c_fc: Linear, c_proj: Linear, } impl Mlp { fn load(inner_dim: usize, vb: VarBuilder, cfg: &Config) -> Result<Self> { let c_fc = linear(cfg.hidden_size, inner_dim, true, vb.pp("c_fc"))?; let c_proj = linear(inner_dim, cfg.hidden_size, true, vb.pp("c_proj"))?; Ok(Self { c_fc, c_proj }) } fn forward(&mut self, hidden_states: &Tensor) -> Result<Tensor> { let hidden_states = self.c_fc.forward(hidden_states)?.gelu()?; let hidden_states = self.c_proj.forward(&hidden_states)?; Ok(hidden_states) } } // TODO: Add cross-attention? struct Block { ln_1: LayerNorm, attn: Attention, ln_2: LayerNorm, mlp: Mlp, } impl Block { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let inner_dim = cfg.n_inner.unwrap_or(4 * hidden_size); let ln_1 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_1"))?; let attn = Attention::load(vb.pp("attn"), cfg)?; let ln_2 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_2"))?; let mlp = Mlp::load(inner_dim, vb.pp("mlp"), cfg)?; Ok(Self { ln_1, attn, ln_2, mlp, }) } fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let residual = hidden_states; let hidden_states = self.ln_1.forward(hidden_states)?; let attn_outputs = self.attn.forward(&hidden_states, attention_mask)?; let hidden_states = (&attn_outputs + residual)?; let residual = &hidden_states; let hidden_states = self.ln_2.forward(&hidden_states)?; let hidden_states = self.mlp.forward(&hidden_states)?; let hidden_states = (&hidden_states + residual)?; Ok(hidden_states) } } pub struct GPTBigCode { wte: Embedding, wpe: Embedding, blocks: Vec<Block>, ln_f: LayerNorm, lm_head: Linear, bias: Tensor, config: Config, } impl GPTBigCode { pub fn config(&self) -> &Config { &self.config } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let vb_t = vb.pp("transformer"); let wte = embedding(cfg.vocab_size, hidden_size, vb_t.pp("wte"))?; let wpe = embedding(cfg.max_position_embeddings, hidden_size, vb_t.pp("wpe"))?; let blocks = (0..cfg.num_hidden_layers) .map(|i| Block::load(vb_t.pp(&format!("h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb_t.pp("ln_f"))?; let lm_head = linear(hidden_size, cfg.vocab_size, false, vb_t.pp("wte"))?; let bias = make_causal_mask(cfg.max_position_embeddings, vb.device())?; Ok(Self { wte, wpe, blocks, lm_head, ln_f, bias, config: cfg, }) } pub fn forward(&mut self, input_ids: &Tensor, past_len: usize) -> Result<Tensor> { let dev = input_ids.device(); let (b_sz, seq_len) = input_ids.dims2()?; let key_len = past_len + seq_len; let attention_mask = self.bias.i((past_len..key_len, ..key_len))?.unsqueeze(0)?; // MQA models: (batch_size, query_length, n_heads, key_length) // MHA models: (batch_size, n_heads, query_length, key_length) let seq_len_dim = if self.config.multi_query { 2 } else { 1 }; let attention_mask = attention_mask.unsqueeze(seq_len_dim)?; let position_ids = Tensor::arange(past_len as u32, (past_len + seq_len) as u32, dev)?; let position_ids = position_ids.unsqueeze(0)?.broadcast_as((b_sz, seq_len))?; let input_embeds = self.wte.forward(input_ids)?; let position_embeds = self.wpe.forward(&position_ids)?; let mut hidden_states = (&input_embeds + &position_embeds)?; for block in self.blocks.iter_mut() { hidden_states = block.forward(&hidden_states, &attention_mask)?; } let hidden_states = self.ln_f.forward(&hidden_states)?; let hidden_states = hidden_states .reshape((b_sz, seq_len, self.config.hidden_size))? .narrow(1, seq_len - 1, 1)?; let logits = self.lm_head.forward(&hidden_states)?.squeeze(1)?; Ok(logits) } }
candle/candle-transformers/src/models/bigcode.rs/0
{ "file_path": "candle/candle-transformers/src/models/bigcode.rs", "repo_id": "candle", "token_count": 6280 }
48
//! EfficientViT (MSRA) inference implementation based on timm. //! //! See "Ef๏ฌcientViT: Memory Ef๏ฌcient Vision Transformer with Cascaded Group Attention" //! https://arxiv.org/abs/2305.07027 //! https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/efficientvit_msra.py use candle::{Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d, conv2d_no_bias, linear, ops::sigmoid, ops::softmax, Conv2dConfig, Func, VarBuilder, }; #[derive(Clone)] pub struct Config { channels: [usize; 3], blocks: [usize; 3], heads: [usize; 3], kernels: [usize; 4], } impl Config { pub fn m0() -> Self { Self { channels: [64, 128, 192], blocks: [1, 2, 3], heads: [4, 4, 4], kernels: [5, 5, 5, 5], } } pub fn m1() -> Self { Self { channels: [128, 144, 192], blocks: [1, 2, 3], heads: [2, 3, 3], kernels: [7, 5, 3, 3], } } pub fn m2() -> Self { Self { channels: [128, 192, 224], blocks: [1, 2, 3], heads: [4, 3, 2], kernels: [7, 5, 3, 3], } } pub fn m3() -> Self { Self { channels: [128, 240, 320], blocks: [1, 2, 3], heads: [4, 3, 4], kernels: [5, 5, 5, 5], } } pub fn m4() -> Self { Self { channels: [128, 256, 384], blocks: [1, 2, 3], heads: [4, 4, 4], kernels: [7, 5, 3, 3], } } pub fn m5() -> Self { Self { channels: [192, 288, 384], blocks: [1, 3, 4], heads: [3, 3, 4], kernels: [7, 5, 3, 3], } } } fn efficientvit_stemblock( in_channels: usize, out_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride: 2, padding: 1, ..Default::default() }; let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?; let conv = conv2d_no_bias(in_channels, out_channels, 3, conv2d_cfg, vb.pp("conv"))?; Ok(Func::new(move |xs| { let xs = xs.apply(&conv)?.apply_t(&bn, false)?; Ok(xs) })) } fn efficientvit_stem(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let conv1 = efficientvit_stemblock(3, dim / 8, vb.pp("conv1"))?; let conv2 = efficientvit_stemblock(dim / 8, dim / 4, vb.pp("conv2"))?; let conv3 = efficientvit_stemblock(dim / 4, dim / 2, vb.pp("conv3"))?; let conv4 = efficientvit_stemblock(dim / 2, dim, vb.pp("conv4"))?; Ok(Func::new(move |xs| { let xs = xs .apply(&conv1)? .relu()? .apply(&conv2)? .relu()? .apply(&conv3)? .relu()? .apply(&conv4)?; Ok(xs) })) } fn depthwise_conv( channels: usize, kernel: usize, stride: usize, padding: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride, padding, groups: channels, ..Default::default() }; let bn = batch_norm(channels, 1e-5, vb.pp("bn"))?; let conv = conv2d_no_bias(channels, channels, kernel, conv2d_cfg, vb.pp("conv"))?; Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false))) } fn pointwise_conv( in_channels: usize, out_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { ..Default::default() }; let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?; let conv = conv2d_no_bias(in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv"))?; Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false))) } fn conv_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> { let pw1 = pointwise_conv(in_channels, out_channels, vb.pp("pw1"))?; let pw2 = pointwise_conv(out_channels, in_channels, vb.pp("pw2"))?; Ok(Func::new(move |xs| { let xs = xs.apply(&pw1)?.relu()?.apply(&pw2)?; Ok(xs) })) } // Fixed per-stage resolutions const RESOLUTIONS: [usize; 3] = [14, 7, 4]; // Attention block fn efficientvit_attn( cfg: &Config, stage: usize, in_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let cga = cascaded_group_attn(cfg, stage, in_channels, vb)?; Ok(Func::new(move |xs| { let mut xs = xs.clone(); let (b, c, h, w) = xs.dims4()?; let win_res = 7; // Fixed window resolution let pad_b = (win_res - h % win_res) % win_res; let pad_r = (win_res - w % win_res) % win_res; let ph = h + pad_b; let pw = w + pad_r; let nh = ph / win_res; let nw = pw / win_res; if RESOLUTIONS[stage] > win_res { xs = xs.permute((0, 2, 3, 1))?; xs = xs.pad_with_zeros(D::Minus1, 0, pad_r)?; xs = xs.pad_with_zeros(D::Minus2, 0, pad_b)?; xs = xs .reshape((b, nh, win_res, nw, win_res, c))? .transpose(2, 3)?; xs = xs .reshape((b * nh * nw, win_res, win_res, c))? .permute((0, 3, 1, 2))?; } xs = xs.apply(&cga)?; if RESOLUTIONS[stage] > win_res { xs = xs .permute((0, 2, 3, 1))? .reshape((b, nh, nw, win_res, win_res, c))?; xs = xs.transpose(2, 3)?.reshape((b, ph, pw, c))?; xs = xs.permute((0, 3, 1, 2))?; } Ok(xs) })) } // Cascaded group attention fn cascaded_group_attn( cfg: &Config, stage: usize, in_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let heads = cfg.heads[stage]; let key_dim = 16; let val_dim = in_channels / heads; let scale = (key_dim as f64).powf(-0.5); let mut dws = Vec::with_capacity(heads); let mut qkvs = Vec::with_capacity(heads); for i in 0..heads { dws.push(depthwise_conv( key_dim, cfg.kernels[i], 1, cfg.kernels[i] / 2, vb.pp(format!("dws.{i}")), )?); qkvs.push(pointwise_conv( in_channels / heads, in_channels / heads + 2 * key_dim, vb.pp(format!("qkvs.{i}")), )?); } let proj = pointwise_conv(in_channels, in_channels, vb.pp("proj.1"))?; Ok(Func::new(move |xs| { let (b, _, h, w) = xs.dims4()?; let feats_in = xs.chunk(heads, 1)?; let mut feats_out = Vec::with_capacity(heads); let mut feat = feats_in[0].clone(); for i in 0..heads { if i > 0 { feat = (&feat + &feats_in[i])?; } feat = feat.apply(&qkvs[i])?; let res = feat.reshape((b, (), h, w))?; let q = res.narrow(1, 0, key_dim)?; let k = res.narrow(1, key_dim, key_dim)?; let v = res.narrow(1, 2 * key_dim, val_dim)?; let q = q.apply(&dws[i])?; let q = q.flatten_from(2)?; let k = k.flatten_from(2)?; let v = v.flatten_from(2)?; let q = (q * scale)?; let att = q.transpose(D::Minus2, D::Minus1)?.matmul(&k)?; let att = softmax(&att, D::Minus1)?; feat = v.matmul(&att.transpose(D::Minus2, D::Minus1)?)?; feat = feat.reshape((b, val_dim, h, w))?; feats_out.push(feat.clone()); } let xs = Tensor::cat(&feats_out, 1)?; let xs = xs.relu()?.apply(&proj)?; Ok(xs) })) } // Used by the downsampling layer fn squeeze_and_excitation( in_channels: usize, squeeze_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { ..Default::default() }; let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?; let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?; Ok(Func::new(move |xs| { let residual = xs; let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?; let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?; residual.broadcast_mul(&xs) })) } // Used by the downsampling layer fn patchmerge(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> { let dim = in_channels; let hid_dim = in_channels * 4; let conv1 = pointwise_conv(dim, hid_dim, vb.pp("conv1"))?; let conv2 = depthwise_conv(hid_dim, 3, 2, 1, vb.pp("conv2"))?; let conv3 = pointwise_conv(hid_dim, out_channels, vb.pp("conv3"))?; let se = squeeze_and_excitation(hid_dim, hid_dim / 4, vb.pp("se"))?; Ok(Func::new(move |xs| { let xs = xs .apply(&conv1)? .relu()? .apply(&conv2)? .relu()? .apply(&se)? .apply(&conv3)?; Ok(xs) })) } // Used by the downsampling layer fn res(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let dw = depthwise_conv(dim, 3, 1, 1, vb.pp("0.m"))?; let mlp = conv_mlp(dim, dim * 2, vb.pp("1.m"))?; Ok(Func::new(move |xs| { let mut xs = xs.clone(); xs = (&xs + &xs.apply(&dw)?)?; xs = (&xs + &xs.apply(&mlp)?)?; Ok(xs) })) } // Downsampling fn efficientvit_downsample( in_channels: usize, out_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let res1 = res(in_channels, vb.pp("res1"))?; let res2 = res(out_channels, vb.pp("res2"))?; let patchmerge = patchmerge(in_channels, out_channels, vb.pp("patchmerge"))?; Ok(Func::new(move |xs| { let xs = xs.apply(&res1)?.apply(&patchmerge)?.apply(&res2)?; Ok(xs) })) } fn efficientvit_block( cfg: &Config, stage: usize, dim: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let dw0 = depthwise_conv(dim, 3, 1, 1, vb.pp("dw0.m"))?; let dw1 = depthwise_conv(dim, 3, 1, 1, vb.pp("dw1.m"))?; let ffn0 = conv_mlp(dim, dim * 2, vb.pp("ffn0.m"))?; let ffn1 = conv_mlp(dim, dim * 2, vb.pp("ffn1.m"))?; let attn = efficientvit_attn(cfg, stage, dim, vb.pp("mixer.m.attn"))?; Ok(Func::new(move |xs| { let mut xs = xs.clone(); xs = (&xs + &xs.apply(&dw0)?)?; xs = (&xs + &xs.apply(&ffn0)?)?; xs = (&xs + &xs.apply(&attn)?)?; xs = (&xs + &xs.apply(&dw1)?)?; xs = (&xs + &xs.apply(&ffn1)?)?; Ok(xs) })) } // Each stage is made of blocks. There is a downsampling layer between stages. fn efficientvit_stage(cfg: &Config, stage: usize, vb: VarBuilder) -> Result<Func<'static>> { let nblocks = cfg.blocks[stage]; let mut blocks = Vec::with_capacity(nblocks + 1); let in_channels = if stage > 0 { cfg.channels[stage - 1] } else { cfg.channels[0] }; let out_channels = cfg.channels[stage]; if stage > 0 { blocks.push(efficientvit_downsample( in_channels, out_channels, vb.pp("downsample"), )?); } for i in 0..nblocks { blocks.push(efficientvit_block( cfg, stage, out_channels, vb.pp(format!("blocks.{i}")), )?); } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for block in blocks.iter() { xs = xs.apply(block)? } Ok(xs) })) } // Classification head. fn efficientvit_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { let norm = batch_norm(outputs, 1e-6, vb.pp("bn"))?; let linear = linear(outputs, nclasses, vb.pp("linear"))?; Ok(Func::new(move |xs| { xs.apply_t(&norm, false)?.apply(&linear) })) } // Build a efficientvit model for a given configuration. fn efficientvit_model( config: &Config, nclasses: Option<usize>, vb: VarBuilder, ) -> Result<Func<'static>> { let cls = match nclasses { None => None, Some(nclasses) => { let outputs = config.channels[2]; let head = efficientvit_head(outputs, nclasses, vb.pp("head"))?; Some(head) } }; let stem_dim = config.channels[0]; let stem = efficientvit_stem(stem_dim, vb.pp("patch_embed"))?; let vb = vb.pp("stages"); let stage1 = efficientvit_stage(config, 0, vb.pp(0))?; let stage2 = efficientvit_stage(config, 1, vb.pp(1))?; let stage3 = efficientvit_stage(config, 2, vb.pp(2))?; Ok(Func::new(move |xs| { let xs = xs .apply(&stem)? .apply(&stage1)? .apply(&stage2)? .apply(&stage3)? .mean(D::Minus2)? .mean(D::Minus1)?; match &cls { None => Ok(xs), Some(cls) => xs.apply(cls), } })) } pub fn efficientvit(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { efficientvit_model(cfg, Some(nclasses), vb) } pub fn efficientvit_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { efficientvit_model(cfg, None, vb) }
candle/candle-transformers/src/models/efficientvit.rs/0
{ "file_path": "candle/candle-transformers/src/models/efficientvit.rs", "repo_id": "candle", "token_count": 6985 }
49
use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use super::llama2_c::Config; pub struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } impl Config { pub fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } impl TransformerWeights { pub fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } pub fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder<'static>> { // TODO: As of 2023-08-04, gemm is slower than expected when multiplying a matrix of // size (1, k) with the transpose of a matrix of size (k, n) as it ends up transposing the // second matrix back. We detect this case here and as a temporary hack make the weight // matrix column major rather than row major. This ends up speeding up text generation from // 120 token/s to 220 token/s on a Ryzen 2600X. let tr = device.is_cpu() && !candle::utils::has_mkl(); let tr = |x: Tensor| if tr { x.t()?.contiguous()?.t() } else { Ok(x) }; let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", tr(self.token_embedding_table.clone())?); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), tr(self.wq.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), tr(self.wk.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), tr(self.wv.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), tr(self.wo.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), tr(self.w1.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), tr(self.w2.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), tr(self.w3.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } }
candle/candle-transformers/src/models/llama2_c_weights.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c_weights.rs", "repo_id": "candle", "token_count": 3322 }
50
//! MobileNet-v4 inference implementation based on timm. //! //! See "MobileNetV4 - Universal Models for the Mobile Ecosystem" //! https://arxiv.org/abs/2404.10518 //! //! https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/mobilenetv3.py use candle::{Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d_no_bias, linear, ops::softmax, Activation, Conv2dConfig, Func, VarBuilder, }; #[derive(Clone, Debug)] enum BlockType { Convolutional { out_channels: usize, kernel: usize, stride: usize, }, UniversalBottleneck { out_channels: usize, start_kernel: usize, mid_kernel: usize, stride: usize, expand: usize, }, EdgeResidual { out_channels: usize, kernel: usize, stride: usize, expand: usize, }, Attention { out_channels: usize, heads: usize, kernel: usize, stride: usize, kv_dim: usize, kv_stride: usize, }, } #[derive(Clone, Debug)] pub struct Config { stem_dim: usize, activation: Activation, stages: [Vec<BlockType>; 5], } #[rustfmt::skip] impl Config { pub fn small() -> Self { Self { stem_dim: 32, activation: Activation::Relu, stages: [ vec![ BlockType::Convolutional { out_channels: 32, kernel: 3, stride: 2}, BlockType::Convolutional { out_channels: 32, kernel: 1, stride: 1}, ], vec![ BlockType::Convolutional { out_channels: 96, kernel: 3, stride: 2}, BlockType::Convolutional { out_channels: 64, kernel: 1, stride: 1}, ], vec![ BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 3}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 3, mid_kernel: 3, stride: 2, expand: 6}, BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 5, stride: 1, expand: 3}, BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 4}, ], vec![ BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1}, ], ], } } pub fn medium() -> Self { Self { stem_dim: 32, activation: Activation::Relu, stages: [ vec![ BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 2}, ], vec![ BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 6}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 6}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 2}, ], vec![ BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1}, ], ], } } pub fn hybrid_medium() -> Self { Self { stem_dim: 32, activation: Activation::Relu, stages: [ vec![ BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 2}, ], vec![ BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 6}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 6}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1}, ], ], } } pub fn large() -> Self { Self { stem_dim: 24, activation: Activation::Relu, stages: [ vec![ BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1}, ], ], } } pub fn hybrid_large() -> Self { Self { stem_dim: 24, activation: Activation::Gelu, stages: [ vec![ BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48}, BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4}, BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64}, BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4}, ], vec![ BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1}, ], ], } } } fn depthwise_conv( channels: usize, kernel: usize, stride: usize, padding: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride, padding, groups: channels, ..Default::default() }; let bn = batch_norm(channels, 1e-5, vb.pp("bn"))?; let conv = conv2d_no_bias(channels, channels, kernel, conv2d_cfg, vb.pp("conv"))?; Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false))) } fn pointwise_conv( in_channels: usize, out_channels: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { ..Default::default() }; let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?; let conv = conv2d_no_bias(in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv"))?; Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false))) } //Universal block that uses two pointwise convolutions and all combinations of two depthwise convolutions. #[allow(clippy::too_many_arguments)] fn universal_inverted_bottleneck_block( cfg: &Config, in_channels: usize, out_channels: usize, expand: usize, start_kernel: usize, mid_kernel: usize, stride: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let act = cfg.activation; let skip_connection = (in_channels == out_channels) && (stride == 1); let dw_start_stride = if mid_kernel > 0 { 1 } else { stride }; let dw_start = depthwise_conv( in_channels, start_kernel, dw_start_stride, start_kernel / 2, vb.pp("dw_start"), ); let pw_exp = pointwise_conv(in_channels, in_channels * expand, vb.pp("pw_exp"))?; let dw_mid = depthwise_conv( in_channels * expand, mid_kernel, stride, mid_kernel / 2, vb.pp("dw_mid"), ); let pw_proj = pointwise_conv(in_channels * expand, out_channels, vb.pp("pw_proj"))?; let gamma = vb.get(out_channels, "layer_scale.gamma"); Ok(Func::new(move |xs| { let residual = xs.clone(); let mut xs = xs.clone(); if let Ok(f) = &dw_start { xs = xs.apply(f)?; } xs = xs.apply(&pw_exp)?.apply(&act)?; if let Ok(f) = &dw_mid { xs = xs.apply(f)?.apply(&act)?; } xs = xs.apply(&pw_proj)?; if let Ok(g) = &gamma { xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?; }; if skip_connection { xs = (xs + residual)?; } Ok(xs) })) } // Convolutional block including norm and activation. fn conv_block( cfg: &Config, in_channels: usize, out_channels: usize, kernel: usize, stride: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride, padding: kernel / 2, ..Default::default() }; let act = cfg.activation; let bn = batch_norm(out_channels, 1e-5, vb.pp("bn1"))?; let conv = conv2d_no_bias(in_channels, out_channels, kernel, conv2d_cfg, vb.pp("conv"))?; Ok(Func::new(move |xs| { xs.apply(&conv)?.apply_t(&bn, false)?.apply(&act) })) } fn edge_residual_block( cfg: &Config, in_channels: usize, out_channels: usize, kernel: usize, stride: usize, expand: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv_exp_cfg = Conv2dConfig { stride, padding: kernel / 2, ..Default::default() }; let conv_pwl_cfg = Conv2dConfig { ..Default::default() }; let act = cfg.activation; let mid_channels = in_channels * expand; let conv_exp = conv2d_no_bias( in_channels, mid_channels, kernel, conv_exp_cfg, vb.pp("conv_exp"), )?; let bn1 = batch_norm(mid_channels, 1e-5, vb.pp("bn1"))?; let conv_pwl = conv2d_no_bias( mid_channels, out_channels, 1, conv_pwl_cfg, vb.pp("conv_pwl"), )?; let bn2 = batch_norm(out_channels, 1e-5, vb.pp("bn2"))?; Ok(Func::new(move |xs| { let xs = xs .apply(&conv_exp)? .apply_t(&bn1, false)? .apply(&act)? .apply(&conv_pwl)? .apply_t(&bn2, false)?; Ok(xs) })) } fn reshape_kv(t: &Tensor) -> Result<Tensor> { let d = t.dims4()?; let t = t .reshape((d.0, d.1, ()))? .transpose(1, 2)? .unsqueeze(1)? .contiguous()?; Ok(t) } fn reshape_query(t: &Tensor, heads: usize, kv_dim: usize) -> Result<Tensor> { let d = t.dims4()?; let t = t .reshape((d.0, heads, kv_dim, ()))? .transpose(D::Minus1, D::Minus2)? .contiguous()?; Ok(t) } fn reshape_output(t: &Tensor, heads: usize, h: usize, w: usize) -> Result<Tensor> { let d = t.dims4()?; let t = t.transpose(1, 2)?; let t = t .reshape((d.0, h, w, d.3 * heads))? .permute((0, 3, 1, 2))? .contiguous()?; Ok(t) } // Mobile multi-query attention #[allow(clippy::too_many_arguments)] fn mqa_block( in_channels: usize, out_channels: usize, heads: usize, kernel: usize, stride: usize, kv_dim: usize, kv_stride: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let down_conv2d_cfg = Conv2dConfig { stride: kv_stride, padding: kernel / 2, groups: in_channels, ..Default::default() }; let proj_conv2d_cfg = Conv2dConfig { stride, ..Default::default() }; let skip_connection = (in_channels == out_channels) && (stride == 1); let gamma = vb.get(out_channels, "layer_scale.gamma"); let norm = batch_norm(out_channels, 1e-5, vb.pp("norm"))?; let scale = (kv_dim as f64).powf(-0.5); let vb = vb.pp("attn"); let query_proj = conv2d_no_bias( out_channels, kv_dim * heads, 1, proj_conv2d_cfg, vb.pp("query.proj"), )?; let key_down_conv = conv2d_no_bias( in_channels, out_channels, kernel, down_conv2d_cfg, vb.pp("key.down_conv"), ); let key_norm = batch_norm(out_channels, 1e-5, vb.pp("key.norm")); let key_proj = conv2d_no_bias(out_channels, kv_dim, 1, proj_conv2d_cfg, vb.pp("key.proj"))?; let value_down_conv = conv2d_no_bias( in_channels, out_channels, kernel, down_conv2d_cfg, vb.pp("value.down_conv"), ); let value_norm = batch_norm(out_channels, 1e-5, vb.pp("value.norm")); let value_proj = conv2d_no_bias( out_channels, kv_dim, 1, proj_conv2d_cfg, vb.pp("value.proj"), )?; let output_proj = conv2d_no_bias( kv_dim * heads, out_channels, 1, proj_conv2d_cfg, vb.pp("output.proj"), )?; Ok(Func::new(move |xs| { let (_, _, h, w) = xs.dims4()?; let residual = xs.clone(); let xs = xs.apply_t(&norm, false)?; // Query let q = xs.apply(&query_proj)?; let q = reshape_query(&q, heads, kv_dim)?; let q = (q * scale)?; // Keys let mut k = xs.clone(); if let (Ok(kd), Ok(n)) = (&key_down_conv, &key_norm) { k = k.apply(kd)?.apply_t(n, false)?; } let k = k.apply(&key_proj)?; let k = reshape_kv(&k)?; // Value let mut v = xs.clone(); if let (Ok(vd), Ok(n)) = (&value_down_conv, &value_norm) { v = v.apply(vd)?; v = v.apply_t(n, false)?; } let v = v.apply(&value_proj)?; let v = reshape_kv(&v)?; let attn = q.broadcast_matmul(&(k.transpose(D::Minus2, D::Minus1)?))?; let attn = softmax(&attn, D::Minus1)?; let o = attn.broadcast_matmul(&v)?; let o = reshape_output(&o, heads, h, w)?; let mut xs = o.apply(&output_proj)?; // Layer scale if let Ok(g) = &gamma { xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?; }; if skip_connection { xs = (xs + residual)?; } Ok(xs) })) } // Stem. fn mobilenetv4_stem(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride: 2, padding: 1, ..Default::default() }; let act = cfg.activation; let out_channels = cfg.stem_dim; let bn = batch_norm(out_channels, 1e-5, vb.pp("bn1"))?; let conv = conv2d_no_bias(3, out_channels, 3, conv2d_cfg, vb.pp("conv_stem"))?; Ok(Func::new(move |xs| { let xs = xs.apply(&conv)?.apply_t(&bn, false)?.apply(&act)?; Ok(xs) })) } // The blocks in all the 5 stages of the model. fn mobilenetv4_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { let mut in_channels = cfg.stem_dim; let mut blocks = Vec::new(); for stage in 0..5 { let nblocks = cfg.stages[stage].len(); for block in 0..nblocks { match cfg.stages[stage][block] { BlockType::Convolutional { out_channels, kernel, stride, } => { blocks.push(conv_block( cfg, in_channels, out_channels, kernel, stride, vb.pp(format!("{stage}.{block}")), )?); in_channels = out_channels; } BlockType::EdgeResidual { out_channels, kernel, stride, expand, } => { blocks.push(edge_residual_block( cfg, in_channels, out_channels, kernel, stride, expand, vb.pp(format!("{stage}.{block}")), )?); in_channels = out_channels; } BlockType::UniversalBottleneck { out_channels, start_kernel, mid_kernel, stride, expand, } => { blocks.push(universal_inverted_bottleneck_block( cfg, in_channels, out_channels, expand, start_kernel, mid_kernel, stride, vb.pp(format!("{stage}.{block}")), )?); in_channels = out_channels; } BlockType::Attention { out_channels, heads, kernel, stride, kv_dim, kv_stride, } => { blocks.push(mqa_block( in_channels, out_channels, heads, kernel, stride, kv_dim, kv_stride, vb.pp(format!("{stage}.{block}")), )?); in_channels = out_channels; } } } } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for block in blocks.iter() { xs = xs.apply(block)? } Ok(xs) })) } // Classification head. fn mobilenetv4_head( cfg: &Config, outputs: usize, nclasses: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { ..Default::default() }; let act = cfg.activation; let conv = conv2d_no_bias(960, outputs, 1, conv2d_cfg, vb.pp("conv_head"))?; let norm = batch_norm(outputs, 1e-5, vb.pp("norm_head"))?; let cls = linear(outputs, nclasses, vb.pp("classifier"))?; Ok(Func::new(move |xs| { let mut xs = xs.clone(); xs = xs.apply(&conv)?; xs = xs.apply_t(&norm, false)?.apply(&act)?; xs = xs.flatten_from(1)?; xs = xs.apply(&cls)?; Ok(xs) })) } // Build a mobilenetv4 model for a given configuration. fn mobilenetv4_model( cfg: &Config, nclasses: Option<usize>, vb: VarBuilder, ) -> Result<Func<'static>> { let cls = match nclasses { None => None, Some(nclasses) => { let outputs = 1280; let head = mobilenetv4_head(cfg, outputs, nclasses, vb.clone())?; Some(head) } }; let stem = mobilenetv4_stem(cfg, vb.clone())?; let blocks = mobilenetv4_blocks(cfg, vb.pp("blocks"))?; Ok(Func::new(move |xs| { let xs = xs.apply(&stem)?.apply(&blocks)?; let xs = xs.mean_keepdim(D::Minus1)?.mean_keepdim(D::Minus2)?; match &cls { None => Ok(xs), Some(cls) => xs.apply(cls), } })) } pub fn mobilenetv4(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { mobilenetv4_model(cfg, Some(nclasses), vb) } pub fn mobilenetv4_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { mobilenetv4_model(cfg, None, vb) }
candle/candle-transformers/src/models/mobilenetv4.rs/0
{ "file_path": "candle/candle-transformers/src/models/mobilenetv4.rs", "repo_id": "candle", "token_count": 16874 }
51
use crate::quantized_nn::{linear_b, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use crate::models::metavoice::repeat_interleave; use candle::{Module, Result, Tensor, D}; pub mod transformer { use super::*; type Config = crate::models::metavoice::transformer::Config; #[derive(Debug, Clone)] struct FeedForward { w1: Linear, w2: Linear, w3: Linear, span: tracing::Span, } impl FeedForward { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let i_size = cfg.intermediate_size(); let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?; let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?; let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?; Ok(Self { w1, w2, w3, span: tracing::span!(tracing::Level::TRACE, "feed-forward"), }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?; swiglu.apply(&self.w2) } } #[derive(Debug, Clone)] struct Attention { wqkv: Linear, wo: Linear, dim: usize, kv_size: usize, n_local_heads: usize, head_dim: usize, n_head: usize, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_local_heads = cfg.n_local_heads(); let head_dim = cfg.head_dim(); let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim; let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?; let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?; Ok(Self { wqkv, wo, dim: cfg.dim, kv_size: n_local_heads * head_dim, n_local_heads, head_dim, n_head: cfg.n_head, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, seqlen, _) = xs.dims3()?; let qkv = xs.apply(&self.wqkv)?; let q = qkv.narrow(D::Minus1, 0, self.dim)?; let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?; let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?; let q = q .reshape((b_sz, seqlen, self.n_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 2)?; let v = Tensor::cat(&[prev_v, &v], 2)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?; let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?; let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?; let attn_weights = attn_weights.broadcast_add(mask)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?; attn_output .transpose(1, 2)? .reshape((b_sz, seqlen, self.dim))? .apply(&self.wo) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Block { attention: Attention, feed_forward: FeedForward, ffn_norm: RmsNorm, attention_norm: RmsNorm, span: tracing::Span, } impl Block { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = Attention::new(cfg, vb.pp("attention"))?; let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?; let ffn_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?; let attention_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?; Ok(Self { attention, feed_forward, ffn_norm, attention_norm, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hs = xs.apply(&self.attention_norm)?; let hs = (xs + self.attention.forward(&hs, pos, mask))?; &hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward) } fn clear_kv_cache(&mut self) { self.attention.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { tok_embeddings: Embedding, pos_embeddings: Embedding, speaker_cond_pos: Linear, layers: Vec<Block>, norm: RmsNorm, output: Linear, spk_cond_mask: Tensor, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let tok_embeddings = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?; let pos_embeddings = Embedding::new(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?; let speaker_cond_pos = linear_b( cfg.speaker_emb_dim, cfg.dim, false, vb.pp("speaker_cond_pos"), )?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = Block::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("norm"))?; let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?; let spk_cond_mask = Tensor::cat( &[ Tensor::ones((1, 1, cfg.dim), candle::DType::F32, vb.device())?, Tensor::zeros((1, 1, cfg.dim), candle::DType::F32, vb.device())?, ], 0, )?; Ok(Self { tok_embeddings, pos_embeddings, speaker_cond_pos, layers, norm, output, spk_cond_mask, span: tracing::span!(tracing::Level::TRACE, "qtransformer"), }) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_sz, seqlen) = xs.dims2()?; let mask: Vec<_> = (0..seqlen) .flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?; let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?; let tok_embeddings = xs.apply(&self.tok_embeddings)?; let pos_embeddings = input_pos.apply(&self.pos_embeddings)?; let mut xs = tok_embeddings .broadcast_add(&pos_embeddings)? .broadcast_add( &spk_emb .apply(&self.speaker_cond_pos)? .broadcast_mul(&self.spk_cond_mask)?, )?; let mask = mask.to_dtype(xs.dtype())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, pos, &mask)? } xs.narrow(1, seqlen - 1, 1)? .contiguous()? .apply(&self.norm)? .apply(&self.output) } } }
candle/candle-transformers/src/models/quantized_metavoice.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_metavoice.rs", "repo_id": "candle", "token_count": 5050 }
52
//! RepVGG inference implementation //! //! See "RepVGG: Making VGG-style ConvNets Great Again" Ding et al. 2021 //! https://arxiv.org/abs/2101.03697 use candle::{Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d_no_bias, linear, BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder, }; const CHANNELS_PER_STAGE: [usize; 5] = [64, 64, 128, 256, 512]; #[derive(Clone)] pub struct Config { a: f32, b: f32, groups: usize, stages: [usize; 4], } impl Config { pub fn a0() -> Self { Self { a: 0.75, b: 2.5, groups: 1, stages: [2, 4, 14, 1], } } pub fn a1() -> Self { Self { a: 1.0, b: 2.5, groups: 1, stages: [2, 4, 14, 1], } } pub fn a2() -> Self { Self { a: 1.5, b: 2.75, groups: 1, stages: [2, 4, 14, 1], } } pub fn b0() -> Self { Self { a: 1.0, b: 2.5, groups: 1, stages: [4, 6, 16, 1], } } pub fn b1() -> Self { Self { a: 2.0, b: 4.0, groups: 1, stages: [4, 6, 16, 1], } } pub fn b2() -> Self { Self { a: 2.5, b: 5.0, groups: 1, stages: [4, 6, 16, 1], } } pub fn b3() -> Self { Self { a: 3.0, b: 5.0, groups: 1, stages: [4, 6, 16, 1], } } pub fn b1g4() -> Self { Self { a: 2.0, b: 4.0, groups: 4, stages: [4, 6, 16, 1], } } pub fn b2g4() -> Self { Self { a: 2.5, b: 5.0, groups: 4, stages: [4, 6, 16, 1], } } pub fn b3g4() -> Self { Self { a: 3.0, b: 5.0, groups: 4, stages: [4, 6, 16, 1], } } } // fuses a convolutional kernel and a batchnorm layer into a convolutional layer // based on the _fuse_bn_tensor method in timm // see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602 fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> { let (gamma, beta) = bn.weight_and_bias().unwrap(); let mu = bn.running_mean(); let sigma = (bn.running_var() + bn.eps())?.sqrt(); let gps = (gamma / sigma)?; let bias = (beta - mu * &gps)?; let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?; Ok((weights, bias)) } // A RepVGG layer has a different training time and inference time architecture. // The latter is a simple and efficient equivalent transformation of the former // realized by a structural reparameterization technique, where 3x3 and 1x1 convolutions // along with identity branches and batchnorm layers are fused into a single 3x3 convolution. fn repvgg_layer( has_identity: bool, dim: usize, stride: usize, in_channels: usize, out_channels: usize, groups: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride, groups, padding: 1, ..Default::default() }; // read and reparameterize the 1x1 conv and bn into w1 and b1 // based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L543 let conv1x1_bn = batch_norm(dim, 1e-5, vb.pp("conv_1x1.bn"))?; let conv1x1 = conv2d_no_bias( in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv_1x1.conv"), )?; let (mut w1, b1) = fuse_conv_bn(conv1x1.weight(), conv1x1_bn)?; // resize to 3x3 w1 = w1.pad_with_zeros(D::Minus1, 1, 1)?; w1 = w1.pad_with_zeros(D::Minus2, 1, 1)?; // read and reparameterize the 3x3 conv and bn into w3 and b3 let convkxk_bn = batch_norm(dim, 1e-5, vb.pp("conv_kxk.bn"))?; let conv3x3 = conv2d_no_bias( in_channels, out_channels, 3, conv2d_cfg, vb.pp("conv_kxk.conv"), )?; let (w3, b3) = fuse_conv_bn(conv3x3.weight(), convkxk_bn)?; let mut w = (w1 + w3)?; let mut b = (b1 + b3)?; // read and reparameterize the identity bn into wi and bi if has_identity { let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"))?; // create a 3x3 convolution equivalent to the identity branch let mut weights: Vec<f32> = vec![0.0; conv3x3.weight().elem_count()]; // https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L620 let in_dim = in_channels / groups; for i in 0..in_channels { weights[i * in_dim * 3 * 3 + (i % in_dim) * 3 * 3 + 4] = 1.0; } let weights = &Tensor::from_vec(weights, w.shape(), w.device())?; let (wi, bi) = fuse_conv_bn(weights, identity_bn)?; w = (w + wi)?; b = (b + bi)?; } // create the 3x3 conv equivalent to the sum of 3x3, 1x1 and identity branches let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg); Ok(Func::new(move |xs| { let xs = xs.apply(&reparam_conv)?.relu()?; Ok(xs) })) } // Get the number of output channels per stage taking into account the multipliers fn output_channels_per_stage(a: f32, b: f32, stage: usize) -> usize { let channels = CHANNELS_PER_STAGE[stage] as f32; match stage { 0 => std::cmp::min(64, (channels * a) as usize), 4 => (channels * b) as usize, _ => (channels * a) as usize, } } // Each stage is made of layers. The first layer always downsamples with stride 2. // All but the first layer have a residual connection. // The G4 variants have a groupwise convolution instead of a dense one on odd layers // counted across stage boundaries, so we keep track of which layer we are in the // full model. fn repvgg_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> { let nlayers = cfg.stages[idx - 1]; let mut layers = Vec::with_capacity(nlayers); let prev_layers: usize = cfg.stages[..idx - 1].iter().sum(); let out_channels_prev = output_channels_per_stage(cfg.a, cfg.b, idx - 1); let out_channels = output_channels_per_stage(cfg.a, cfg.b, idx); for layer_idx in 0..nlayers { let (has_identity, stride, in_channels) = if layer_idx == 0 { (false, 2, out_channels_prev) } else { (true, 1, out_channels) }; let groups = if (prev_layers + layer_idx) % 2 == 1 { cfg.groups } else { 1 }; layers.push(repvgg_layer( has_identity, out_channels, stride, in_channels, out_channels, groups, vb.pp(layer_idx), )?) } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)? } Ok(xs) })) } // Build a RepVGG model for a given configuration. fn repvgg_model(config: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> { let cls = match nclasses { None => None, Some(nclasses) => { let outputs = output_channels_per_stage(config.a, config.b, 4); let linear = linear(outputs, nclasses, vb.pp("head.fc"))?; Some(linear) } }; let stem_dim = output_channels_per_stage(config.a, config.b, 0); let stem = repvgg_layer(false, stem_dim, 2, 3, stem_dim, 1, vb.pp("stem"))?; let vb = vb.pp("stages"); let stage1 = repvgg_stage(config, 1, vb.pp(0))?; let stage2 = repvgg_stage(config, 2, vb.pp(1))?; let stage3 = repvgg_stage(config, 3, vb.pp(2))?; let stage4 = repvgg_stage(config, 4, vb.pp(3))?; Ok(Func::new(move |xs| { let xs = xs .apply(&stem)? .apply(&stage1)? .apply(&stage2)? .apply(&stage3)? .apply(&stage4)? .mean(D::Minus1)? .mean(D::Minus1)?; match &cls { None => Ok(xs), Some(cls) => xs.apply(cls), } })) } pub fn repvgg(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { repvgg_model(cfg, Some(nclasses), vb) } pub fn repvgg_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { repvgg_model(cfg, None, vb) }
candle/candle-transformers/src/models/repvgg.rs/0
{ "file_path": "candle/candle-transformers/src/models/repvgg.rs", "repo_id": "candle", "token_count": 4371 }
53
use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug)] pub struct TimestepEmbedding { linear_1: nn::Linear, linear_2: nn::Linear, } impl TimestepEmbedding { // act_fn: "silu" pub fn new(vs: nn::VarBuilder, channel: usize, time_embed_dim: usize) -> Result<Self> { let linear_1 = nn::linear(channel, time_embed_dim, vs.pp("linear_1"))?; let linear_2 = nn::linear(time_embed_dim, time_embed_dim, vs.pp("linear_2"))?; Ok(Self { linear_1, linear_2 }) } } impl Module for TimestepEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = nn::ops::silu(&self.linear_1.forward(xs)?)?; self.linear_2.forward(&xs) } } #[derive(Debug)] pub struct Timesteps { num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64, } impl Timesteps { pub fn new(num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64) -> Self { Self { num_channels, flip_sin_to_cos, downscale_freq_shift, } } } impl Module for Timesteps { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let half_dim = (self.num_channels / 2) as u32; let exponent = (Tensor::arange(0, half_dim, xs.device())?.to_dtype(candle::DType::F32)? * -f64::ln(10000.))?; let exponent = (exponent / (half_dim as f64 - self.downscale_freq_shift))?; let emb = exponent.exp()?.to_dtype(xs.dtype())?; // emb = timesteps[:, None].float() * emb[None, :] let emb = xs.unsqueeze(D::Minus1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let (cos, sin) = (emb.cos()?, emb.sin()?); let emb = if self.flip_sin_to_cos { Tensor::cat(&[&cos, &sin], D::Minus1)? } else { Tensor::cat(&[&sin, &cos], D::Minus1)? }; if self.num_channels % 2 == 1 { emb.pad_with_zeros(D::Minus2, 0, 1) } else { Ok(emb) } } }
candle/candle-transformers/src/models/stable_diffusion/embeddings.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/embeddings.rs", "repo_id": "candle", "token_count": 1008 }
54
pub mod audio; pub mod model; pub mod quantized_model; use serde::Deserialize; // The names in comments correspond to the original implementation: // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L17 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub num_mel_bins: usize, // n_mels pub max_source_positions: usize, // n_audio_ctx pub d_model: usize, // n_audio_state pub encoder_attention_heads: usize, // n_audio_head pub encoder_layers: usize, // n_audio_layer pub vocab_size: usize, // n_vocab pub max_target_positions: usize, // n_text_ctx // pub n_text_state: usize, pub decoder_attention_heads: usize, // n_text_head pub decoder_layers: usize, // n_text_layer #[serde(default)] pub suppress_tokens: Vec<u32>, } pub const DTYPE: candle::DType = candle::DType::F32; // Audio parameters. pub const SAMPLE_RATE: usize = 16000; pub const N_FFT: usize = 400; pub const HOP_LENGTH: usize = 160; pub const CHUNK_LENGTH: usize = 30; pub const N_SAMPLES: usize = CHUNK_LENGTH * SAMPLE_RATE; // 480000 samples in a 30-second chunk pub const N_FRAMES: usize = N_SAMPLES / HOP_LENGTH; // 3000 frames in a mel spectrogram input pub const NO_SPEECH_THRESHOLD: f64 = 0.6; pub const LOGPROB_THRESHOLD: f64 = -1.0; pub const TEMPERATURES: [f64; 6] = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]; pub const COMPRESSION_RATIO_THRESHOLD: f64 = 2.4; // Tokenizer dependent bits. pub const SOT_TOKEN: &str = "<|startoftranscript|>"; pub const TRANSCRIBE_TOKEN: &str = "<|transcribe|>"; pub const TRANSLATE_TOKEN: &str = "<|translate|>"; pub const NO_TIMESTAMPS_TOKEN: &str = "<|notimestamps|>"; pub const EOT_TOKEN: &str = "<|endoftext|>"; pub const NO_SPEECH_TOKENS: [&str; 2] = ["<|nocaptions|>", "<|nospeech|>"];
candle/candle-transformers/src/models/whisper/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/whisper/mod.rs", "repo_id": "candle", "token_count": 812 }
55
use candle::quantized::QTensor; use candle::{Device, Result, Shape}; use std::sync::Arc; // VarBuilder specialized for QTensors #[derive(Clone)] pub struct VarBuilder { data: Arc<std::collections::HashMap<String, Arc<QTensor>>>, path: Vec<String>, device: Device, } impl VarBuilder { pub fn from_gguf<P: AsRef<std::path::Path>>(p: P, device: &Device) -> Result<Self> { let mut file = std::fs::File::open(p)?; let content = candle::quantized::gguf_file::Content::read(&mut file)?; let mut data = std::collections::HashMap::new(); for tensor_name in content.tensor_infos.keys() { let tensor = content.tensor(&mut file, tensor_name, device)?; data.insert(tensor_name.to_string(), Arc::new(tensor)); } Ok(Self { data: Arc::new(data), path: Vec::new(), device: device.clone(), }) } pub fn from_gguf_buffer(buffer: &[u8], device: &Device) -> Result<Self> { let mut cursor = std::io::Cursor::new(buffer); let content = candle::quantized::gguf_file::Content::read(&mut cursor)?; let mut data = std::collections::HashMap::new(); for tensor_name in content.tensor_infos.keys() { let tensor = content.tensor(&mut cursor, tensor_name, device)?; data.insert(tensor_name.to_string(), Arc::new(tensor)); } Ok(Self { data: Arc::new(data), path: Vec::new(), device: device.clone(), }) } pub fn pp<S: ToString>(&self, s: S) -> Self { let mut path = self.path.clone(); path.push(s.to_string()); Self { data: self.data.clone(), path, device: self.device.clone(), } } fn path(&self, tensor_name: &str) -> String { if self.path.is_empty() { tensor_name.to_string() } else { [&self.path.join("."), tensor_name].join(".") } } pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Arc<QTensor>> { let path = self.path(name); match self.data.get(&path) { None => { candle::bail!("cannot find tensor {path}") } Some(qtensor) => { let shape = s.into(); if qtensor.shape() != &shape { candle::bail!( "shape mismatch for {name}, got {:?}, expected {shape:?}", qtensor.shape() ) } Ok(qtensor.clone()) } } } pub fn get_no_shape(&self, name: &str) -> Result<Arc<QTensor>> { let path = self.path(name); match self.data.get(&path) { None => { candle::bail!("cannot find tensor {name}") } Some(qtensor) => Ok(qtensor.clone()), } } pub fn device(&self) -> &Device { &self.device } pub fn contains_key(&self, key: &str) -> bool { self.data.contains_key(key) } }
candle/candle-transformers/src/quantized_var_builder.rs/0
{ "file_path": "candle/candle-transformers/src/quantized_var_builder.rs", "repo_id": "candle", "token_count": 1559 }
56
<!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <title>Candle Blip Image Captioning Demo</title> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> const MODELS = { blip_image_quantized_q4k: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q4k.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "271 MB", }, blip_image_quantized_q80: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q80.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "505 MB", }, blip_image_large: { base_url: "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/refs%2Fpr%2F18/", model: "model.safetensors", config: "config.json", tokenizer: "tokenizer.json", quantized: false, size: "1.88 GB", }, }; const blipWorker = new Worker("./blipWorker.js", { type: "module", }); const outputStatusEl = document.querySelector("#output-status"); const outputCaptionEl = document.querySelector("#output-caption"); const modelSelectEl = document.querySelector("#model"); const clearBtn = document.querySelector("#clear-btn"); const fileUpload = document.querySelector("#file-upload"); const dropArea = document.querySelector("#drop-area"); const imagesExamples = document.querySelector("#image-select"); const canvas = document.querySelector("#canvas"); const ctxCanvas = canvas.getContext("2d"); let isCaptioning = false; let currentImageURL = null; clearBtn.addEventListener("click", () => { clearImageCanvas(); }); modelSelectEl.addEventListener("change", () => { if (currentImageURL) { runInference(currentImageURL); } }); //add event listener to file input fileUpload.addEventListener("input", async (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", async (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } else if (url) { clearImageCanvas(); await drawImageCanvas(url); runInference(url); } }); imagesExamples.addEventListener("click", async (e) => { if (isCaptioning) { return; } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); isCaptioning = false; clearBtn.disabled = true; canvas.parentElement.style.height = "auto"; outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = "Please select an image"; currentImageURL = null; } async function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } return new Promise((resolve, reject) => { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; clearBtn.disabled = false; resolve(img); }; img.src = imgURL; currentImageURL = imgURL; }); } document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelectEl.appendChild(option); } }); async function getImageCaption( worker, weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } function updateStatus(data) { if (data.status === "status") { outputStatusEl.innerText = data.message; } } async function runInference(imageURL) { if (isCaptioning || !imageURL) { alert("Please select an image first"); return; } outputStatusEl.hidden = false; outputCaptionEl.hidden = true; clearBtn.disabled = true; modelSelectEl.disabled = true; isCaptioning = true; const selectedModel = modelSelectEl.value; const model = MODELS[selectedModel]; const weightsURL = `${model.base_url}${model.model}`; const tokenizerURL = `${model.base_url}${model.tokenizer}`; const configURL = `${model.base_url}${model.config}`; const quantized = model.quantized; try { const time = performance.now(); const caption = await getImageCaption( blipWorker, weightsURL, tokenizerURL, configURL, selectedModel, imageURL, quantized, updateStatus ); outputStatusEl.hidden = true; outputCaptionEl.hidden = false; const totalTime = ((performance.now() - time)/1000).toFixed(2); outputCaptionEl.innerHTML = `${ caption.output }<br/><span class="text-xs">Inference time: ${totalTime} s</span>`; } catch (err) { console.error(err); outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = err.message; } clearBtn.disabled = false; modelSelectEl.disabled = false; isCaptioning = false; } </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> ๐Ÿ•ฏ๏ธ </span> <div> <h1 class="text-5xl font-bold">Candle BLIP Image Captioning</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://huggingface.co/Salesforce/blip-image-captioning-large" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >BLIP Image Captioning </a> running in the browser using <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle</a >, a minimalist ML framework for Rust. </p> <p class="text-xs max-w-lg py-2"> <b>Note:</b> The image captioning on the smallest model takes about ~50 seconds, it will vary depending on your machine and model size. </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" ></select> </div> <!-- drag and drop area --> <div class="grid gap-4 sm:grid-cols-2 py-4"> <div class="relative max-w-lg"> <div class="absolute w-full bottom-full flex justify-between items-center" > <div class="flex gap-2 w-full"> <button id="clear-btn" disabled title="Clear Image" class="ml-auto text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center" > <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em" > <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> </button> </div> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden" > <div class="flex flex-col items-center justify-center space-y-1 text-center" > <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg" > <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700" > <span>Drag and drop y our image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute pointer-events-none w-full" ></canvas> </div> </div> <div class=""> <div class="h-full bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <p id="output-caption" class="m-auto text-xl text-center p-2" hidden ></p> <span id="output-status" class="m-auto font-light"> Please select an image </span> </div> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select" > <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/blip/index.html/0
{ "file_path": "candle/candle-wasm-examples/blip/index.html", "repo_id": "candle", "token_count": 7164 }
57
use crate::model::{Cache, Config, Llama}; use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transferred via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub tokenizer: Vec<u8>, pub model: Vec<u8>, } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } pub struct Model { pub cache: Cache, pub config: Config, pub llama: Llama, pub tokenizer: Tokenizer, } impl Model { fn run( &self, link: &WorkerLink<Worker>, id: HandlerId, temp: f64, top_p: f64, prompt: String, ) -> Result<()> { let dev = Device::Cpu; let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1.0 { None } else { Some(top_p) }; console_log!("temp: {temp:?} top_p: {top_p:?} prompt: {prompt}"); let mut logits_processor = LogitsProcessor::new(299792458, temp, top_p); let mut index_pos = 0; let mut tokens = self .tokenizer .encode(prompt.to_string(), true) .map_err(|m| candle::Error::Msg(m.to_string()))? .get_ids() .to_vec(); link.respond(id, Ok(WorkerOutput::Generated(prompt))); for index in 0.. { if tokens.len() >= self.config.seq_len { break; } let context_size = if self.cache.use_kv_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &dev)?.unsqueeze(0)?; let logits = self.llama.forward(&input, index_pos)?; let logits = logits.squeeze(0)?; index_pos += ctxt.len(); let next_token = logits_processor.sample(&logits)?; tokens.push(next_token); if let Some(text) = self.tokenizer.id_to_token(next_token) { let text = text.replace('โ–', " ").replace("<0x0A>", "\n"); link.respond(id, Ok(WorkerOutput::Generated(text))); } } Ok(()) } } impl Config { fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } impl TransformerWeights { fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder> { let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", self.token_embedding_table.clone()); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), self.wq.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), self.wk.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), self.wv.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), self.wo.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), self.w1.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), self.w2.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), self.w3.i(layer)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } } impl Model { pub fn load(md: ModelData) -> Result<Self> { let dev = Device::Cpu; let mut model = std::io::Cursor::new(md.model); let config = Config::from_reader(&mut model)?; let weights = TransformerWeights::from_reader(&mut model, &config, &dev)?; let vb = weights.var_builder(&config, &dev)?; let cache = Cache::new(true, &config, vb.pp("rot"))?; let llama = Llama::load(vb, &cache, &config)?; let tokenizer = Tokenizer::from_bytes(&md.tokenizer).map_err(|m| candle::Error::Msg(m.to_string()))?; Ok(Self { cache, config, llama, tokenizer, }) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), Run(f64, f64, String), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { Generated(String), GenerationDone(std::result::Result<(), String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::Run(temp, top_p, prompt) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { { let mut cache = model.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let result = model .run(&self.link, id, temp, top_p, prompt) .map_err(|e| e.to_string()); Ok(WorkerOutput::GenerationDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
candle/candle-wasm-examples/llama2-c/src/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/worker.rs", "repo_id": "candle", "token_count": 5770 }
58
[package] name = "candle-wasm-example-sam" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } num-traits = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } getrandom = { version = "0.2", features = ["js"] } image = { workspace = true } log = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" wasm-bindgen = "0.2.87" serde-wasm-bindgen = "0.6.0"
candle/candle-wasm-examples/segment-anything/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/segment-anything/Cargo.toml", "repo_id": "candle", "token_count": 264 }
59
export async function extractEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus, normalize_embeddings = true ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export async function generateText( worker, weightsURL, tokenizerURL, configURL, modelID, prompt, params, updateStatus ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, prompt, params, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export const MODELS = { t5_small_quantized: { size: "64.4 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, t5_small: { size: "242 MB", base_url: "https://huggingface.co/t5-small/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_small: { size: "308 MB", base_url: "https://huggingface.co/google/flan-t5-small/resolve/refs%2Fpr%2F14/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_base_quantized: { size: "263 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model-flan-t5-base.gguf", tokenizer: "tokenizer.json", config: "config-flan-t5-base.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, coedit_large_quantized: { size: "643 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { fluency: { prefix: "Fix the grammar: ", max_length: 300, }, coherence: { prefix: "Rewrite to make this easier to understand: ", max_length: 300, }, simplification: { prefix: "translate English to Romanian: ", max_length: 300, }, simplification: { prefix: "Paraphrase this: ", max_length: 300, }, formalization: { prefix: "Write this more formally: ", max_length: 300, }, neutralize: { prefix: "Write in a more neutral way: ", max_length: 300, }, }, }, }; export function getModelInfo(id, taskID) { const model = MODELS[id]; return { modelURL: model.base_url + model.model, configURL: model.base_url + model.config, tokenizerURL: model.base_url + model.tokenizer, maxLength: model.tasks[taskID].max_length, }; }
candle/candle-wasm-examples/t5/utils.js/0
{ "file_path": "candle/candle-wasm-examples/t5/utils.js", "repo_id": "candle", "token_count": 2339 }
60
[package] name = "candle-wasm-example-yolo" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } num-traits = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } image = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "0.3.70" features = [ 'Blob', 'CanvasRenderingContext2d', 'Document', 'Element', 'HtmlElement', 'HtmlCanvasElement', 'HtmlImageElement', 'ImageData', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', 'TextMetrics', ]
candle/candle-wasm-examples/yolo/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/yolo/Cargo.toml", "repo_id": "candle", "token_count": 463 }
61
pub fn add(left: usize, right: usize) -> usize { left + right } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() { let result = add(2, 2); assert_eq!(result, 4); } }
candle/candle-wasm-tests/src/lib.rs/0
{ "file_path": "candle/candle-wasm-tests/src/lib.rs", "repo_id": "candle", "token_count": 108 }
62
apiVersion: v1 kind: ConfigMap metadata: labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }} namespace: {{ .Release.Namespace }} data: {{- range $key, $value := $.Values.envVars }} {{ $key }}: {{ $value | quote }} {{- end }}
chat-ui/chart/templates/config.yaml/0
{ "file_path": "chat-ui/chart/templates/config.yaml", "repo_id": "chat-ui", "token_count": 96 }
63
# Amazon Web Services (AWS) | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | You may specify your Amazon SageMaker instance as an endpoint for Chat UI: ```ini MODELS=`[{ "name": "your-model", "displayName": "Your Model", "description": "Your description", "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type" : "aws", "service" : "sagemaker" "url": "", "accessKey": "", "secretKey" : "", "sessionToken": "", "region": "", "weight": 1 } ] }]` ``` You can also set `"service": "lambda"` to use a lambda instance. You can get the `accessKey` and `secretKey` from your AWS user, under programmatic access.
chat-ui/docs/source/configuration/models/providers/aws.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/aws.md", "repo_id": "chat-ui", "token_count": 348 }
64
# ๐Ÿค— Chat UI Open source chat interface with support for tools, web search, multimodal and many API providers. The app uses MongoDB and SvelteKit behind the scenes. Try the live version of the app called [HuggingChat on hf.co/chat](https://huggingface.co/chat) or [setup your own instance](./installation/spaces). ๐Ÿ”ง **[Tools](./configuration/models/tools)**: Function calling with custom tools and support for [Zero GPU spaces](https://huggingface.co/spaces/enzostvs/zero-gpu-spaces) ๐Ÿ” **[Web Search](./configuration/web-search)**: Automated web search, scraping and RAG for all models ๐Ÿ™ **[Multimodal](./configuration/models/multimodal)**: Accepts image file uploads on supported providers ๐Ÿ‘ค **[OpenID](./configuration/open-id)**: Optionally setup OpenID for user authentication <div class="flex gap-x-4"> <div> Tools <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-dark.png" height="auto"/> </div> </div> <div> Web Search <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-dark.png" height="auto"/> </div> </div> </div> ## Quickstart You can quickly have a locally running chat-ui & LLM text-generation server thanks to chat-ui's [llama.cpp server support](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp). **Step 1 (Start llama.cpp server):** ```bash # install llama.cpp brew install llama.cpp # start llama.cpp server (using hf.co/microsoft/Phi-3-mini-4k-instruct-gguf as an example) llama-server --hf-repo microsoft/Phi-3-mini-4k-instruct-gguf --hf-file Phi-3-mini-4k-instruct-q4.gguf -c 4096 ``` A local LLaMA.cpp HTTP Server will start on `http://localhost:8080`. Read more [here](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp). **Step 2 (tell chat-ui to use local llama.cpp server):** Add the following to your `.env.local`: ```ini MODELS=`[ { "name": "Local microsoft/Phi-3-mini-4k-instruct-gguf", "tokenizer": "microsoft/Phi-3-mini-4k-instruct-gguf", "preprompt": "", "chatPromptTemplate": "<s>{{preprompt}}{{#each messages}}{{#ifUser}}<|user|>\n{{content}}<|end|>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}<|end|>\n{{/ifAssistant}}{{/each}}", "parameters": { "stop": ["<|end|>", "<|endoftext|>", "<|assistant|>"], "temperature": 0.7, "max_new_tokens": 1024, "truncate": 3071 }, "endpoints": [{ "type" : "llamacpp", "baseURL": "http://localhost:8080" }], }, ]` ``` Read more [here](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp). **Step 3 (make sure you have MongoDb running locally):** ```bash docker run -d -p 27017:27017 --name mongo-chatui mongo:latest ``` Read more [here](https://github.com/huggingface/chat-ui?tab=Readme-ov-file#database). **Step 4 (start chat-ui):** ```bash git clone https://github.com/huggingface/chat-ui cd chat-ui npm install npm run dev -- --open ``` Read more [here](https://github.com/huggingface/chat-ui?tab=readme-ov-file#launch). <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-dark.png" height="auto"/> </div>
chat-ui/docs/source/index.md/0
{ "file_path": "chat-ui/docs/source/index.md", "repo_id": "chat-ui", "token_count": 1435 }
65