text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SAM-HQ model.""" import tempfile import unittest import pytest import requests from transformers import ( SamHQConfig, SamHQMaskDecoderConfig, SamHQPromptEncoderConfig, SamHQVisionConfig, SamHQVisionModel, pipeline, ) from transformers.testing_utils import Expectations, cleanup, require_torch, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SamHQModel, SamHQProcessor if is_vision_available(): from PIL import Image class SamHQVisionModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.output_channels = output_channels self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def get_config(self): return SamHQVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def create_and_check_model(self, config, pixel_values): model = SamHQVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) output_size = self.image_size // self.patch_size self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_channels, output_size, output_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SamHQVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SamHQVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False test_torch_exportable = True def setUp(self): self.model_tester = SamHQVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=SamHQVisionConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-4:]), list(expected_attention_shape), ) @unittest.skip(reason="SamVisionModel does not support training") def test_training(self): pass @unittest.skip(reason="SamVisionModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SamVisionModel does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="SAM model can't be compiled dynamic yet") class SamHQPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamHQPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class SamHQMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=12, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, vit_dim=36, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps self.vit_dim = vit_dim def get_config(self): return SamHQMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, vit_dim=self.vit_dim, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class SamHQModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=12, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = SamHQPromptEncoderTester() self.mask_decoder_tester = SamHQMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamHQVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamHQConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = SamHQModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): # Explicitly pass multimask_output=True result = model(pixel_values, multimask_output=True) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = SamHQModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): image_embeddings = model.get_image_embeddings(pixel_values) self.parent.assertEqual(image_embeddings[0][0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_and_intermediate_embeddings(self, config, pixel_values): model = SamHQModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): image_embeddings, intermediate_embeddings = model.get_image_embeddings(pixel_values) self.parent.assertEqual(image_embeddings[0].shape, (self.output_channels, 12, 12)) self.parent.assertEqual(intermediate_embeddings[0][0].shape, (12, 12, self.hidden_size)) def create_and_check_get_image_intermediate_embeddings(self, config, pixel_values): model = SamHQModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): image_embeddings, intermediate_embeddings = model.get_image_embeddings(pixel_values) self.parent.assertIsInstance(intermediate_embeddings, list) self.parent.assertTrue(len(intermediate_embeddings) > 0) for embedding in intermediate_embeddings: self.parent.assertEqual(embedding.shape, (self.batch_size, 12, 12, self.hidden_size)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = SamHQModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result.hidden_states), self.num_hidden_layers + 1) self.parent.assertEqual(result[-1][0].shape, expected_hidden_states_shape) with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result.hidden_states), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SamHQModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM-HQ's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SamHQModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": SamHQModel, "mask-generation": SamHQModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False test_cpu_offload = False test_disk_offload_bin = False test_disk_offload_safetensors = False # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = SamHQModelTester(self) common_properties = ["initializer_range"] self.config_tester = ConfigTester( self, config_class=SamHQConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SAM-HQ's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Compile not yet supported in SamHQ models") def test_sdpa_can_dispatch_on_flash(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_get_image_and_intermediate_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_and_intermediate_embeddings(*config_and_inputs) def test_get_image_intermediate_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_intermediate_embeddings(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True config.vision_config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="SamHQModel does not support training") def test_training(self): pass @unittest.skip(reason="SamHQModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SamHQModel does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # Use a slightly higher default tol to make the tests non-flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @slow def test_model_from_pretrained(self): model_name = "syscv-community/sam-hq-vit-base" model = SamHQModel.from_pretrained(model_name) self.assertIsNotNone(model) @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="SamHQModel can't be compiled dynamic yet") def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa") model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) # Root model determines SDPA support attn_impl = "sdpa" if model._supports_sdpa else "eager" self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_sdpa.vision_encoder.config._attn_implementation == attn_impl) self.assertTrue(model_sdpa.mask_decoder.config._attn_implementation == attn_impl) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_encoder.config._attn_implementation == "eager") self.assertTrue(model_eager.mask_decoder.config._attn_implementation == "eager") # Verify SDPA/eager layer presence has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa and attn_impl == "sdpa": raise ValueError("The SDPA model should have SDPA attention layers") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @slow class SamHQModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def test_inference_mask_generation_no_point(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[0][0][-1], torch.tensor(0.4482), atol=2e-4)) expectations = Expectations( { (None, None): [-13.1695, -14.6201, -14.8989], ("cuda", 8): [-7.6769, -9.6935, -9.8773], } ) EXPECTED_MASKS = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(masks, EXPECTED_MASKS, atol=2e-3, rtol=2e-3) def test_inference_mask_generation_one_point_one_bb(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] torch.testing.assert_close(scores[-1], torch.tensor(0.9700).to(torch_device), atol=2e-4, rtol=2e-4) torch.testing.assert_close( masks, torch.tensor([-9.2033, -8.5505, -7.1361]).to(torch_device), atol=3e-2, rtol=3e-2 ) def test_inference_mask_generation_batched_points_batched_images(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] expectations = Expectations( { (None, None): [ [ [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], ], [ [0.7598, 0.7388, 0.3110], [0.9195, 0.8317, 0.6614], [0.9195, 0.8317, 0.6614], [0.9195, 0.8317, 0.6614], ], ], ("cuda", 8): [ [ [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], ], [ [0.7597, 0.7387, 0.3110], [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], [0.9195, 0.8316, 0.6614], ], ], } ) EXPECTED_SCORES = torch.tensor(expectations.get_expectation()).to(torch_device) expectations = Expectations( { (None, None): [-40.2445, -37.4300, -38.1577], ("cuda", 8): [-14.1195, -17.2663, -13.7805], } ) EXPECTED_MASKS = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(scores, EXPECTED_SCORES, atol=1e-3, rtol=1e-3) torch.testing.assert_close(masks, EXPECTED_MASKS, atol=9e-3, rtol=9e-3) def test_inference_mask_generation_one_point_one_bb_zero(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.8680), atol=1e-3)) def test_inference_mask_generation_with_labels(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9137), atol=1e-4)) def test_inference_mask_generation_without_labels(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9137), atol=1e-3)) def test_inference_mask_generation_two_points_with_labels(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.8859), atol=1e-3)) def test_inference_mask_generation_two_points_without_labels(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.8859), atol=1e-3)) def test_inference_mask_generation_two_points_batched(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, images_kwargs={"point_pad_value": -10}, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[0][-1], torch.tensor(0.4482), atol=1e-4)) self.assertTrue(torch.allclose(scores[1][-1], torch.tensor(0.4482), atol=1e-4)) def test_inference_mask_generation_one_box(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.6265), atol=1e-4)) def test_inference_mask_generation_batched_image_one_point(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores_batched = outputs.iou_scores.squeeze() input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores_single = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores_batched[1, :], scores_single, atol=1e-4)) def test_inference_mask_generation_two_points_point_batch(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: skip input_points = input_points.unsqueeze(0) inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 2, 3)) torch.testing.assert_close( iou_scores, torch.tensor([[[0.9889, 0.9508, 0.9137], [0.8070, 0.7934, 0.7932]]]), atol=1e-3, rtol=1e-3 ) def test_inference_mask_generation_three_boxes_point_batch(self): model = SamHQModel.from_pretrained("syscv-community/sam-hq-vit-base") processor = SamHQProcessor.from_pretrained("syscv-community/sam-hq-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() # fmt: off input_boxes = torch.Tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]).cpu() EXPECTED_IOU = torch.tensor([[[0.9850, 0.9730, 0.9726], [0.8891, 0.8017, 0.6265], [0.8891, 0.8017, 0.6265]]]) # fmt: on input_boxes = input_boxes.unsqueeze(0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 3, 3)) torch.testing.assert_close(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4) def test_dummy_pipeline_generation(self): generator = pipeline("mask-generation", model="syscv-community/sam-hq-vit-base", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
transformers/tests/models/sam_hq/test_modeling_sam_hq.py/0
{ "file_path": "transformers/tests/models/sam_hq/test_modeling_sam_hq.py", "repo_id": "transformers", "token_count": 20267 }
587
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SegGpt model.""" import inspect import math import unittest from datasets import load_dataset from transformers import SegGptConfig from transformers.testing_utils import ( Expectations, require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SegGptForImageSegmentation, SegGptModel from transformers.models.seggpt.modeling_seggpt import SegGptLoss if is_vision_available(): from transformers import SegGptImageProcessor class SegGptModelTester: def __init__( self, parent, batch_size=2, image_size=30, patch_size=2, num_channels=3, is_training=False, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, mlp_ratio=2.0, merge_index=0, intermediate_hidden_state_indices=[1], pretrain_image_size=10, decoder_hidden_size=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.mlp_ratio = mlp_ratio self.merge_index = merge_index self.intermediate_hidden_state_indices = intermediate_hidden_state_indices self.pretrain_image_size = pretrain_image_size self.decoder_hidden_size = decoder_hidden_size # in SegGpt, the seq length equals the number of patches (we don't use the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) prompt_pixel_values = floats_tensor( [self.batch_size, self.num_channels, self.image_size // 2, self.image_size] ) prompt_masks = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) labels = None if self.use_labels: labels = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) config = self.get_config() return config, pixel_values, prompt_pixel_values, prompt_masks, labels def get_config(self): return SegGptConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, initializer_range=self.initializer_range, mlp_ratio=self.mlp_ratio, merge_index=self.merge_index, intermediate_hidden_state_indices=self.intermediate_hidden_state_indices, pretrain_image_size=self.pretrain_image_size, decoder_hidden_size=self.decoder_hidden_size, ) def create_and_check_model(self, config, pixel_values, prompt_pixel_values, prompt_masks, labels): model = SegGptModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, prompt_pixel_values, prompt_masks) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.image_size // self.patch_size, self.image_size // self.patch_size, self.hidden_size, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, prompt_pixel_values, prompt_masks, labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "prompt_pixel_values": prompt_pixel_values, "prompt_masks": prompt_masks, } return config, inputs_dict @require_torch class SegGptModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SegGpt does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SegGptModel, SegGptForImageSegmentation) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False test_torch_exportable = True pipeline_model_mapping = ( {"feature-extraction": SegGptModel, "mask-generation": SegGptModel} if is_torch_available() else {} ) def setUp(self): self.model_tester = SegGptModelTester(self) self.config_tester = ConfigTester(self, config_class=SegGptConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SegGpt does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "prompt_pixel_values", "prompt_masks"] self.assertListEqual(arg_names[:3], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_height = patch_width = config.image_size // config.patch_size self.assertListEqual( list(hidden_states[0].shape[-3:]), [patch_height, patch_width, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_batching_equivalence(self): def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( torch.max(torch.abs(batched_row - single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={torch.max(torch.abs(batched_row - single_row_object))}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: # the first hidden state in SegGPT has weird hack of adding first half of batch with second half if key == "hidden_states": model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def test_seggpt_loss(self): torch.manual_seed(100) config = self.model_tester.get_config() prompt_masks = torch.rand(1, config.num_channels, config.image_size, config.image_size) label = torch.rand(1, config.num_channels, config.image_size, config.image_size) pred_masks = torch.rand(1, config.num_channels, config.image_size * 2, config.image_size) # seq_len x 2 because the loss concatenates prompt_masks and labels as pred_masks is concatenated bool_masked_pos = torch.rand(1, self.model_tester.seq_length * 2) > 0.5 loss = SegGptLoss(config) loss_value = loss(prompt_masks, pred_masks, label, bool_masked_pos) expected_loss_value = torch.tensor(0.3340) torch.testing.assert_close(loss_value, expected_loss_value, rtol=1e-4, atol=1e-4) @slow def test_model_from_pretrained(self): model_name = "BAAI/seggpt-vit-large" model = SegGptModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] images = [image.convert("RGB") for image in ds["image"]] masks = [image.convert("RGB") for image in ds["mask"]] return images, masks def prepare_bool_masked_pos(config: SegGptConfig): num_patches = math.prod([i // config.patch_size for i in config.image_size]) mask_ratio = 0.75 torch.manual_seed(2) num_masked_patches = int(num_patches * mask_ratio) shuffle_idx = torch.randperm(num_patches) bool_masked_pos = torch.FloatTensor([0] * (num_patches - num_masked_patches) + [1] * num_masked_patches)[ shuffle_idx ] bool_masked_pos = bool_masked_pos.unsqueeze(0).bool() return bool_masked_pos @require_torch @require_vision class SegGptModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return SegGptImageProcessor.from_pretrained("BAAI/seggpt-vit-large") if is_vision_available() else None @slow def test_one_shot_inference(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_image = images[1] prompt_image = images[0] prompt_mask = masks[0] inputs = image_processor( images=input_image, prompt_images=prompt_image, prompt_masks=prompt_mask, return_tensors="pt", do_convert_rgb=False, ) inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 3, 896, 448)) self.assertEqual(outputs.pred_masks.shape, expected_shape) expectations = Expectations( { (None, None): [ [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], ], ("cuda", 8): [ [[-2.1208, -2.1189, -2.1198], [-2.1236, -2.1229, -2.1230], [-2.1233, -2.1227, -2.1228]], [[-2.0408, -2.0398, -2.0405], [-2.0435, -2.0437, -2.0438], [-2.0431, -2.0435, -2.0436]], [[-1.8101, -1.8086, -1.8098], [-1.8129, -1.8126, -1.8130], [-1.8128, -1.8128, -1.8130]], ], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.pred_masks[0, :, :3, :3], expected_slice, rtol=2e-4, atol=2e-4) result = image_processor.post_process_semantic_segmentation(outputs, [input_image.size[::-1]])[0] result_expected_shape = torch.Size((170, 297)) expected_area = 1082 area = (result > 0).sum().item() self.assertEqual(result.shape, result_expected_shape) self.assertEqual(area, expected_area) @slow def test_few_shot_inference(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_images = [images[1]] * 2 prompt_images = [images[0], images[2]] prompt_masks = [masks[0], masks[2]] inputs = image_processor( images=input_images, prompt_images=prompt_images, prompt_masks=prompt_masks, return_tensors="pt", do_convert_rgb=False, ) inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs, feature_ensemble=True) expected_shape = torch.Size((2, 3, 896, 448)) expected_slice = torch.tensor( [ [[-2.1201, -2.1192, -2.1189], [-2.1217, -2.1210, -2.1204], [-2.1216, -2.1202, -2.1194]], [[-2.0393, -2.0390, -2.0387], [-2.0402, -2.0402, -2.0397], [-2.0400, -2.0394, -2.0388]], [[-1.8083, -1.8076, -1.8077], [-1.8105, -1.8102, -1.8099], [-1.8105, -1.8095, -1.8090]], ] ).to(torch_device) self.assertEqual(outputs.pred_masks.shape, expected_shape) torch.testing.assert_close(outputs.pred_masks[0, :, 448:451, :3], expected_slice, rtol=4e-4, atol=4e-4) @slow def test_one_shot_with_label(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_image = images[1] label = masks[1] prompt_image = images[0] prompt_mask = masks[0] inputs = image_processor( images=input_image, prompt_masks=prompt_mask, prompt_images=prompt_image, return_tensors="pt", do_convert_rgb=False, ).to(torch_device) labels = image_processor(images=None, prompt_masks=label, return_tensors="pt", do_convert_rgb=False)[ "prompt_masks" ].to(torch_device) bool_masked_pos = prepare_bool_masked_pos(model.config).to(torch_device) with torch.no_grad(): outputs = model(**inputs, labels=labels, bool_masked_pos=bool_masked_pos) expected_loss = torch.tensor(0.0074).to(torch_device) torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
transformers/tests/models/seggpt/test_modeling_seggpt.py/0
{ "file_path": "transformers/tests/models/seggpt/test_modeling_seggpt.py", "repo_id": "transformers", "token_count": 8736 }
588
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SmolLM3 model.""" import gc import unittest import pytest from packaging import version from parameterized import parameterized from transformers import AutoTokenizer, SmolLM3Config, is_torch_available from transformers.generation.configuration_utils import GenerationConfig from transformers.testing_utils import ( backend_empty_cache, is_flaky, require_bitsandbytes, require_flash_attn, require_torch, slow, torch_device, ) from transformers.utils.import_utils import is_torch_greater_or_equal if is_torch_available(): import torch from transformers import ( SmolLM3ForCausalLM, SmolLM3ForQuestionAnswering, SmolLM3ForSequenceClassification, SmolLM3ForTokenClassification, SmolLM3Model, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, ) class SmolLM3ModelTester(CausalLMModelTester): config_class = SmolLM3Config if is_torch_available(): base_model_class = SmolLM3Model causal_lm_class = SmolLM3ForCausalLM sequence_class = SmolLM3ForSequenceClassification token_class = SmolLM3ForTokenClassification question_answering_class = SmolLM3ForQuestionAnswering @require_torch class SmolLM3ModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( SmolLM3Model, SmolLM3ForCausalLM, SmolLM3ForSequenceClassification, SmolLM3ForTokenClassification, SmolLM3ForQuestionAnswering, ) if is_torch_available() else () ) test_headmasking = False test_pruning = False model_tester_class = SmolLM3ModelTester pipeline_model_mapping = ( { "feature-extraction": SmolLM3Model, "text-classification": SmolLM3ForSequenceClassification, "token-classification": SmolLM3ForTokenClassification, "text-generation": SmolLM3ForCausalLM, "question-answering": SmolLM3ForQuestionAnswering, } if is_torch_available() else {} ) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # flaky test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions return getattr(ModelTesterMixin, self._testMethodName)(self) @require_torch class SmolLM3IntegrationTest(unittest.TestCase): model_id = "HuggingFaceTB/SmolLM3-3B" @slow def test_model_3b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = SmolLM3ForCausalLM.from_pretrained(self.model_id, device_map="auto") input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.float().cpu() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[9.3306, 8.1721, 6.4764, 7.6011, 11.1218, 7.5343, 7.1195, 8.0956]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor( [15.7759, 17.6274, 16.3404, 14.5543, 13.1366, 14.2475, 15.8710, 15.6753, 12.3856, 13.0386, 14.0792, 12.7253, 13.9634, 12.1271, 12.4320, 16.0329, 17.3975, 17.1396, 17.8666, 17.0103, 17.2962, 16.8777, 16.7144, 16.3023, 16.6084, 12.4649, 12.0723, 14.1148, 14.8239, 15.2733]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4) del model backend_empty_cache(torch_device) gc.collect() @slow def test_model_3b_generation(self): EXPECTED_TEXT_COMPLETION = """Gravity is the force that pulls objects toward the center of the Earth. It is a force that is always present, even""" prompt = "Gravity is the force" tokenizer = AutoTokenizer.from_pretrained(self.model_id) model = SmolLM3ForCausalLM.from_pretrained(self.model_id, device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect() @require_bitsandbytes @slow @require_flash_attn @pytest.mark.flash_attn_test def test_model_3b_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = SmolLM3ForCausalLM.from_pretrained( self.model_id, device_map="auto", load_in_4bit=True, attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model del model backend_empty_cache(torch_device) gc.collect() @pytest.mark.torch_export_test @slow def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, convert_and_export_with_cache, ) tokenizer = AutoTokenizer.from_pretrained( self.model_id, pad_token="<|finetune_right_pad_id|>", padding_side="right" ) EXPECTED_TEXT_COMPLETION = "Gravity is the force that pulls objects toward the center of the Earth. It is a force that is always present, and" max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = SmolLM3ForCausalLM.from_pretrained( self.model_id, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompt = ["Gravity is the force"] prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + export strict = is_torch_greater_or_equal("2.7.0") # Due to https://github.com/pytorch/pytorch/issues/150994 exported_program = convert_and_export_with_cache(model, strict=strict) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
transformers/tests/models/smollm3/test_modeling_smollm3.py/0
{ "file_path": "transformers/tests/models/smollm3/test_modeling_smollm3.py", "repo_id": "transformers", "token_count": 3954 }
589
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 processors.""" import json import os import shutil import tempfile import unittest from transformers import is_speech_available, is_torch_available from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_speech, require_torch from transformers.utils import FEATURE_EXTRACTOR_NAME if is_speech_available() and is_torch_available(): from transformers import SpeechT5FeatureExtractor, SpeechT5Processor from .test_feature_extraction_speecht5 import floats_list SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_torch @require_speech class SpeechT5ProcessorTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(cls.tmpdirname) feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "do_normalize": False, "num_mel_bins": 80, "hop_length": 16, "win_length": 64, "win_function": "hann_window", "fmin": 80, "fmax": 7600, "mel_floor": 1e-10, "reduction_factor": 2, "return_attention_mask": True, } cls.feature_extraction_file = os.path.join(cls.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(cls.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs): return SpeechT5Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return SpeechT5FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SpeechT5Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = SpeechT5Processor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = SpeechT5Tokenizer.from_pretrained(tmpdir, bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = SpeechT5FeatureExtractor.from_pretrained( tmpdir, do_normalize=False, padding_value=1.0 ) processor = SpeechT5Processor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio=raw_speech, return_tensors="np") input_processor = processor(audio=raw_speech, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_feature_extractor_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio_target=raw_speech, return_tensors="np") input_processor = processor(audio_target=raw_speech, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text_target=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/speecht5/test_processing_speecht5.py/0
{ "file_path": "transformers/tests/models/speecht5/test_processing_speecht5.py", "repo_id": "transformers", "token_count": 2818 }
590
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.models.superpoint.modeling_superpoint import SuperPointKeypointDescriptionOutput if is_vision_available(): from transformers import SuperPointImageProcessor if is_torchvision_available(): from transformers import SuperPointImageProcessorFast class SuperPointImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_grayscale=True, ): size = size if size is not None else {"height": 480, "width": 640} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_grayscale = do_grayscale def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_grayscale": self.do_grayscale, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_keypoint_detection_output(self, pixel_values): max_number_keypoints = 50 batch_size = len(pixel_values) mask = torch.zeros((batch_size, max_number_keypoints)) keypoints = torch.zeros((batch_size, max_number_keypoints, 2)) scores = torch.zeros((batch_size, max_number_keypoints)) descriptors = torch.zeros((batch_size, max_number_keypoints, 16)) for i in range(batch_size): random_number_keypoints = np.random.randint(0, max_number_keypoints) mask[i, :random_number_keypoints] = 1 keypoints[i, :random_number_keypoints] = torch.rand((random_number_keypoints, 2)) scores[i, :random_number_keypoints] = torch.rand((random_number_keypoints,)) descriptors[i, :random_number_keypoints] = torch.rand((random_number_keypoints, 16)) return SuperPointKeypointDescriptionOutput( loss=None, keypoints=keypoints, scores=scores, descriptors=descriptors, mask=mask, hidden_states=None ) @require_torch @require_vision class SuperPointImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = SuperPointImageProcessor if is_vision_available() else None fast_image_processing_class = SuperPointImageProcessorFast if is_torchvision_available() else None def setUp(self) -> None: super().setUp() self.image_processor_tester = SuperPointImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processing(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_grayscale")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 480, "width": 640}) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size={"height": 42, "width": 42} ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) @unittest.skip(reason="SuperPointImageProcessor is always supposed to return a grayscaled image") def test_call_numpy_4_channels(self): pass def test_input_image_properly_converted_to_grayscale(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor.preprocess(image_inputs) for image in pre_processed_images["pixel_values"]: if isinstance(image, torch.Tensor): self.assertTrue( torch.all(image[0, ...] == image[1, ...]).item() and torch.all(image[1, ...] == image[2, ...]).item() ) else: self.assertTrue(np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])) @require_torch def test_post_processing_keypoint_detection(self): def check_post_processed_output(post_processed_output, image_size): for post_processed_output, image_size in zip(post_processed_output, image_size): self.assertTrue("keypoints" in post_processed_output) self.assertTrue("descriptors" in post_processed_output) self.assertTrue("scores" in post_processed_output) keypoints = post_processed_output["keypoints"] all_below_image_size = torch.all(keypoints[:, 0] <= image_size[1]) and torch.all( keypoints[:, 1] <= image_size[0] ) all_above_zero = torch.all(keypoints[:, 0] >= 0) and torch.all(keypoints[:, 1] >= 0) self.assertTrue(all_below_image_size) self.assertTrue(all_above_zero) for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor.preprocess(image_inputs, return_tensors="pt") outputs = self.image_processor_tester.prepare_keypoint_detection_output(**pre_processed_images) tuple_image_sizes = [(image.size[0], image.size[1]) for image in image_inputs] tuple_post_processed_outputs = image_processor.post_process_keypoint_detection(outputs, tuple_image_sizes) check_post_processed_output(tuple_post_processed_outputs, tuple_image_sizes) tensor_image_sizes = torch.tensor([image.size for image in image_inputs]).flip(1) tensor_post_processed_outputs = image_processor.post_process_keypoint_detection( outputs, tensor_image_sizes ) check_post_processed_output(tensor_post_processed_outputs, tensor_image_sizes)
transformers/tests/models/superpoint/test_image_processing_superpoint.py/0
{ "file_path": "transformers/tests/models/superpoint/test_image_processing_superpoint.py", "repo_id": "transformers", "token_count": 3424 }
591
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest from transformers import UMT5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils.fx import symbolic_trace from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( AutoTokenizer, UMT5EncoderModel, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering, UMT5ForSequenceClassification, UMT5ForTokenClassification, UMT5Model, ) # Copied from test.models.t5.test_modeling_t5.T5ModelTester with T5->UMT5 class UMT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=False, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return UMT5Config.from_pretrained("google/umt5-base") def prepare_inputs_dict( self, config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input input_ids = input_ids.clamp(self.pad_token_id + 2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() config.encoder_attention_heads = config.num_attention_heads input_dict = self.prepare_inputs_dict(config, input_ids, decoder_input_ids) return config, input_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_pipeline_config(self): return UMT5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return UMT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_model_fp16_forward( self, config, input_dict, ): model = UMT5Model(config=config).to(torch_device).half().eval() output = model(**input_dict)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_sequence_classification_head( self, config, input_dict, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model(**input_dict, labels=labels) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) @require_torch class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (UMT5Model, UMT5ForConditionalGeneration, UMT5ForSequenceClassification, UMT5ForQuestionAnswering) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": UMT5Model, "question-answering": UMT5ForQuestionAnswering, "summarization": UMT5ForConditionalGeneration, "text-classification": UMT5ForSequenceClassification, "text2text-generation": UMT5ForConditionalGeneration, "translation": UMT5ForConditionalGeneration, "zero-shot": UMT5ForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = True test_torchscript = True # The small UMT5 model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = UMT5ModelTester(self) # `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not self.fx_compatible: self.skipTest(reason="torch fx is not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "UMT5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() # UMT5ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (UMT5Model, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] # overwrite because T5 doesn't accept position ids as input and expects `decoder_input_ids` def test_custom_4d_attention_mask(self): for model_class in self.all_generative_model_classes: config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(device=torch_device, dtype=torch.float32) ( input_ids, _, input_ids_shared_prefix, mask_shared_prefix, _, ) = self._get_custom_4d_mask_test_data() logits = model.forward( decoder_input_ids=input_ids, input_ids=input_dict["input_ids"][:3], ).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids=input_dict["input_ids"][:1], decoder_input_ids=input_ids_shared_prefix, decoder_attention_mask=mask_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTester with T5->UMT5 class UMT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return UMT5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = UMT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = UMT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = UMT5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForTokenClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict # Copied from tests.models.t5.test_modeling_t5.T5EncoderOnlyModelTest with T5->UMT5 class UMT5EncoderOnlyModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (UMT5EncoderModel, UMT5ForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True pipeline_model_mapping = ( { "token-classification": UMT5ForTokenClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (UMT5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = UMT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=UMT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if tokenizer_name is None: return True # `UMT5EncoderOnlyModelTest` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) if pipeline_test_case_name == "TokenClassificationPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False @require_torch @require_sentencepiece @require_tokenizers class Umt5IntegrationTest(unittest.TestCase): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_integration_test(self): """ For comparison run the kaggle notebook available here : https://www.kaggle.com/arthurzucker/umt5-inference """ model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=True).to(torch_device) tokenizer = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=False, legacy=False) input_text = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] input_ids = tokenizer(input_text, return_tensors="pt", padding=True).input_ids # fmt: off EXPECTED_IDS = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_close(input_ids, EXPECTED_IDS) generated_ids = model.generate(input_ids.to(torch_device)) EXPECTED_FILLING = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] filling = tokenizer.batch_decode(generated_ids) self.assertEqual(filling, EXPECTED_FILLING)
transformers/tests/models/umt5/test_modeling_umt5.py/0
{ "file_path": "transformers/tests/models/umt5/test_modeling_umt5.py", "repo_id": "transformers", "token_count": 14821 }
592
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ViTMAE model.""" import math import tempfile import unittest import numpy as np from pytest import mark from transformers import ViTMAEConfig from transformers.testing_utils import ( is_flaky, require_flash_attn, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTMAEModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, mask_ratio=0.5, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope self.attn_implementation = attn_implementation # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) self.mask_ratio = mask_ratio self.num_masks = int(mask_ratio * self.seq_length) self.mask_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, decoder_hidden_size=self.hidden_size, decoder_intermediate_size=self.intermediate_size, decoder_num_attention_heads=self.num_attention_heads, decoder_num_hidden_layers=self.num_hidden_layers, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() result = model(pixel_values) num_patches = (self.image_size // self.patch_size) ** 2 expected_num_channels = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) # test greyscale images config.num_channels = 1 model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) expected_num_channels = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () pipeline_model_mapping = {"image-feature-extraction": ViTMAEModel} if is_torch_available() else {} test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True def setUp(self): self.model_tester = ViTMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): after_outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Make sure we don't have nans out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_determinism(self): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""") def test_model_outputs_equivalence(self): pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass") def test_batching_equivalence(self): pass @slow def test_model_from_pretrained(self): model_name = "google/vit-base-patch16-224" model = ViTMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) inputs_dict["pixel_values"] = inputs_dict["pixel_values"].to(torch.bfloat16) model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) # ForPretraining model has random `noise` -> need to set seed # to make the test deterministic torch.manual_seed(12345) outputs = model(**inputs_dict, output_hidden_states=True) torch.manual_seed(12345) outputs_fa = model_fa(**inputs_dict, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(**inputs_dict) @unittest.skip("Not applicable for VideoMAE") def test_flash_attn_2_inference_equivalence_right_padding(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # This is an excepton in the module, it's initialized with xavier_uniform without using initializer_range if name.endswith("patch_embeddings.projection.weight"): continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") @cached_property def default_model(self): return ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device) @slow def test_inference_for_pretraining(self): # make random mask reproducible across the PT and TF model np.random.seed(2) model = self.default_model image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) vit_mae_config = ViTMAEConfig() num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) noise = torch.from_numpy(np.random.uniform(size=(1, num_patches))).to(device=torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, noise=noise) # verify the logits expected_shape = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice.to(torch_device), rtol=1e-4, atol=1e-4) @slow def test_inference_interpolate_pos_encoding(self): # ViTMAE models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. # make random mask reproducible across the PT and TF model np.random.seed(2) model = self.default_model image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt", do_resize=False).to(torch_device) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) vit_mae_config = ViTMAEConfig() num_patches = (image.height // vit_mae_config.patch_size) * (image.width // vit_mae_config.patch_size) noise = torch.from_numpy(np.random.uniform(size=(1, num_patches))).to(device=torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, noise=noise, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 1200, 768)) self.assertEqual(outputs.logits.shape, expected_shape) @slow def test_inference_interpolate_pos_encoding_custom_sizes(self): # Ensure custom sizes are correctly handled when interpolating the position embeddings # make random mask reproducible across the PT and TF model np.random.seed(2) model = self.default_model image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt", size={"height": 256, "width": 256}).to( torch_device ) # forward pass with torch.no_grad(): outputs = model( **inputs, interpolate_pos_encoding=True, ) # verify the logits expected_shape = torch.Size((1, 256, 768)) self.assertEqual(outputs.logits.shape, expected_shape)
transformers/tests/models/vit_mae/test_modeling_vit_mae.py/0
{ "file_path": "transformers/tests/models/vit_mae/test_modeling_vit_mae.py", "repo_id": "transformers", "token_count": 7608 }
593
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Wav2Vec2-Conformer model.""" import math import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2ConformerConfig, is_torch_available from transformers.testing_utils import ( is_flaky, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _sample_negative_indices from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import ( Wav2Vec2ConformerGumbelVectorQuantizer, _compute_mask_indices, ) class Wav2Vec2ConformerModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, position_embeddings_type="relative", scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.position_embeddings_type = position_embeddings_type output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self, position_embeddings_type="relative"): input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(position_embeddings_type=position_embeddings_type) return config, input_values, attention_mask def get_config(self, position_embeddings_type="relative"): return Wav2Vec2ConformerConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, position_embeddings_type=position_embeddings_type, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_float16(self, config, input_values, attention_mask): model = Wav2Vec2ConformerModel(config=config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = Wav2Vec2ConformerModel.from_pretrained(tmpdirname, dtype=torch.float16) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_values.type(dtype=torch.float16), attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ConformerForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ConformerForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ConformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ConformerForCTC, Wav2Vec2ConformerModel, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ConformerForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ConformerForCTC, "feature-extraction": Wav2Vec2ConformerModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2ConformerModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2ConformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @is_flaky( description="The `codevector_idx` computed with `argmax()` in `Wav2Vec2ConformerGumbelVectorQuantizer.forward` is not stable." ) def test_batching_equivalence(self, atol=1e-4, rtol=1e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_no_rel_pos(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type=None) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model_float16(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model_float16(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Wav2Vec2Conformer has not inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Wav2Vec2Conformer has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Wav2Vec2Conformer has not token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Wav2Vec2Conformer has not inputs_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "pos_bias_u") and module.pos_bias_u is not None: module.pos_bias_u.data.fill_(3) if hasattr(module, "pos_bias_v") and module.pos_bias_v is not None: module.pos_bias_v.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ConformerForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ConformerForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2ConformerModel.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") self.assertIsNotNone(model) @require_torch class Wav2Vec2ConformerUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2ConformerGumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2ConformerGumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consists of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consists of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @slow class Wav2Vec2ConformerModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter(lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]) speech_samples = speech_samples[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_normal_batched_rel_pos(self): model = Wav2Vec2ConformerForCTC.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large-960h-ft") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained( "facebook/wav2vec2-conformer-rel-pos-large-960h-ft", do_lower_case=True ) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loincloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched_rope(self): model = Wav2Vec2ConformerForCTC.from_pretrained("facebook/wav2vec2-conformer-rope-large-960h-ft") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained( "facebook/wav2vec2-conformer-rope-large-960h-ft", do_lower_case=True ) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_pretrained(self): model = Wav2Vec2ConformerForPreTraining.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-conformer-rel-pos-large", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2ConformerConfig.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") model_rand = Wav2Vec2ConformerForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2_conformer model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2_conformer model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)
transformers/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py/0
{ "file_path": "transformers/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py", "repo_id": "transformers", "token_count": 17473 }
594
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import re import tempfile import unittest from datasets import Dataset, DatasetDict from huggingface_hub import hf_hub_download from packaging import version from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, OPTForCausalLM, Trainer, TrainingArguments, logging, ) from transformers.testing_utils import ( CaptureLogger, require_bitsandbytes, require_peft, require_torch, require_torch_accelerator, slow, torch_device, ) from transformers.utils import check_torch_load_is_safe, is_torch_available if is_torch_available(): import torch @require_peft @require_torch class PeftTesterMixin: peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",) transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",) transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM) # TODO: run it with CI after PEFT release. @slow class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin): """ A testing suite that makes sure that the PeftModel class is correctly integrated into the transformers library. """ def _check_lora_correctly_converted(self, model): """ Utility method to check if the model has correctly adapters injected on it. """ from peft.tuners.tuners_utils import BaseTunerLayer is_peft_loaded = False for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break return is_peft_loaded def _get_bnb_4bit_config(self): return BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4") def _get_bnb_8bit_config(self): return BitsAndBytesConfig(load_in_8bit=True) def test_peft_from_pretrained(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This checks if we pass a remote folder that contains an adapter config and adapter weights, it should correctly load a model that has adapters injected on it. """ logger = logging.get_logger("transformers.integrations.peft") for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: with CaptureLogger(logger) as cl: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) # ensure that under normal circumstances, there are no warnings about keys self.assertNotIn("unexpected keys", cl.out) self.assertNotIn("missing keys", cl.out) self.assertTrue(self._check_lora_correctly_converted(peft_model)) self.assertTrue(peft_model._hf_peft_config_loaded) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_state_dict(self): """ Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains the expected keys. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) state_dict = peft_model.get_adapter_state_dict() for key in state_dict: self.assertTrue("lora" in key) def test_peft_save_pretrained(self): """ Test that checks various combinations of `save_pretrained` with a model that has adapters loaded on it. This checks if the saved model contains the expected files (adapter weights and adapter config). """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("config.json" not in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) def test_peft_enable_disable_adapters(self): """ A test that checks if `enable_adapters` and `disable_adapters` methods work as expected. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) peft_model.add_adapter(peft_config) peft_logits = peft_model(dummy_input).logits peft_model.disable_adapters() peft_logits_disabled = peft_model(dummy_input).logits peft_model.enable_adapters() peft_logits_enabled = peft_model(dummy_input).logits torch.testing.assert_close(peft_logits, peft_logits_enabled, rtol=1e-12, atol=1e-12) self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12)) def test_peft_add_adapter(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_add_adapter_from_pretrained(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained)) def test_peft_add_adapter_modules_to_save(self): """ Simple test that tests if `add_adapter` works as expected when training with modules to save. """ from peft import LoraConfig from peft.utils import ModulesToSaveWrapper for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"]) model.add_adapter(peft_config) self._check_lora_correctly_converted(model) _has_modules_to_save_wrapper = False for name, module in model.named_modules(): if isinstance(module, ModulesToSaveWrapper): _has_modules_to_save_wrapper = True self.assertTrue(module.modules_to_save.default.weight.requires_grad) self.assertTrue("lm_head" in name) break self.assertTrue(_has_modules_to_save_wrapper) state_dict = model.get_adapter_state_dict() self.assertTrue("lm_head.weight" in state_dict) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for _, param in model.named_parameters(): if param.requires_grad: self.assertTrue(param.grad is not None) def test_peft_add_adapter_training_gradient_checkpointing(self): """ Simple test that tests if `add_adapter` works as expected when training with gradient checkpointing. """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # When attaching adapters the input embeddings will stay frozen, this will # lead to the output embedding having requires_grad=False. dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(frozen_output.requires_grad is False) model.gradient_checkpointing_enable() # Since here we attached the hook, the input should have requires_grad to set # properly non_frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(non_frozen_output.requires_grad is True) # To repro the Trainer issue dummy_input.requires_grad = False for name, param in model.named_parameters(): if "lora" in name.lower(): self.assertTrue(param.requires_grad) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for name, param in model.named_parameters(): if param.requires_grad: self.assertTrue("lora" in name.lower()) self.assertTrue(param.grad is not None) def test_peft_add_multi_adapter(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected in multi-adapter setting. """ from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: is_peft_loaded = False model = transformers_class.from_pretrained(model_id).to(torch_device) logits_original_model = model(dummy_input).logits peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) logits_adapter_1 = model(dummy_input) model.add_adapter(peft_config, adapter_name="adapter-2") logits_adapter_2 = model(dummy_input) for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break self.assertTrue(is_peft_loaded) # dummy generation _ = model.generate(input_ids=dummy_input) model.set_adapter("default") self.assertTrue(model.active_adapters() == ["default"]) self.assertTrue(model.active_adapter() == "default") model.set_adapter("adapter-2") self.assertTrue(model.active_adapters() == ["adapter-2"]) self.assertTrue(model.active_adapter() == "adapter-2") # Logits comparison self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)) model.set_adapter(["adapter-2", "default"]) self.assertTrue(model.active_adapters() == ["adapter-2", "default"]) self.assertTrue(model.active_adapter() == "adapter-2") logits_adapter_mixed = model(dummy_input) self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse( torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) # multi active adapter saving not supported with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) def test_delete_adapter(self): """ Enhanced test for `delete_adapter` to handle multiple adapters, edge cases, and proper error handling. """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) # Add multiple adapters peft_config_1 = LoraConfig(init_lora_weights=False) peft_config_2 = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config_1, adapter_name="adapter_1") model.add_adapter(peft_config_2, adapter_name="adapter_2") # Ensure adapters were added self.assertIn("adapter_1", model.peft_config) self.assertIn("adapter_2", model.peft_config) # Delete a single adapter model.delete_adapter("adapter_1") self.assertNotIn("adapter_1", model.peft_config) self.assertIn("adapter_2", model.peft_config) # Delete remaining adapter model.delete_adapter("adapter_2") self.assertFalse(hasattr(model, "peft_config")) self.assertFalse(model._hf_peft_config_loaded) # Re-add adapters for edge case tests model.add_adapter(peft_config_1, adapter_name="adapter_1") model.add_adapter(peft_config_2, adapter_name="adapter_2") # Attempt to delete multiple adapters at once model.delete_adapter(["adapter_1", "adapter_2"]) self.assertFalse(hasattr(model, "peft_config")) self.assertFalse(model._hf_peft_config_loaded) # Test edge cases msg = re.escape("No adapter loaded. Please load an adapter first.") with self.assertRaisesRegex(ValueError, msg): model.delete_adapter("nonexistent_adapter") model.add_adapter(peft_config_1, adapter_name="adapter_1") with self.assertRaisesRegex(ValueError, "The following adapter\\(s\\) are not present"): model.delete_adapter("nonexistent_adapter") with self.assertRaisesRegex(ValueError, "The following adapter\\(s\\) are not present"): model.delete_adapter(["adapter_1", "nonexistent_adapter"]) # Deleting with an empty list or None should not raise errors model.add_adapter(peft_config_2, adapter_name="adapter_2") model.delete_adapter([]) # No-op self.assertIn("adapter_1", model.peft_config) self.assertIn("adapter_2", model.peft_config) # Deleting duplicate adapter names in the list model.delete_adapter(["adapter_1", "adapter_1"]) self.assertNotIn("adapter_1", model.peft_config) self.assertIn("adapter_2", model.peft_config) @require_torch_accelerator @require_bitsandbytes def test_peft_from_pretrained_kwargs(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs and see if the integraiton behaves as expected. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: bnb_config = self._get_bnb_8bit_config() peft_model = transformers_class.from_pretrained( model_id, device_map="auto", quantization_config=bnb_config ) module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) @require_torch_accelerator @require_bitsandbytes def test_peft_save_quantized(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: bnb_config = self._get_bnb_4bit_config() peft_model = transformers_class.from_pretrained( model_id, device_map="auto", quantization_config=bnb_config ) module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: bnb_config = self._get_bnb_8bit_config() peft_model = transformers_class.from_pretrained( model_id, device_map="auto", quantization_config=bnb_config ) module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) @require_torch_accelerator @require_bitsandbytes def test_peft_save_quantized_regression(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models Regression test to make sure everything works as expected before the safetensors integration. """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: bnb_config = self._get_bnb_4bit_config() peft_model = transformers_class.from_pretrained( model_id, device_map="auto", quantization_config=bnb_config ) module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: bnb_config = self._get_bnb_8bit_config() peft_model = transformers_class.from_pretrained( model_id, device_map="auto", quantization_config=bnb_config ) module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) def test_peft_pipeline(self): """ Simple test that tests the basic usage of PEFT model + pipeline """ from transformers import pipeline for adapter_id, base_model_id in zip(self.peft_test_model_ids, self.transformers_test_model_ids): peft_pipe = pipeline("text-generation", adapter_id) base_pipe = pipeline("text-generation", base_model_id) peft_params = list(peft_pipe.model.parameters()) base_params = list(base_pipe.model.parameters()) self.assertNotEqual(len(peft_params), len(base_params)) # Assert we actually loaded the adapter too _ = peft_pipe("Hello", max_new_tokens=20) def test_peft_add_adapter_with_state_dict(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected with a state_dict being passed. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) with self.assertRaises(ValueError): model.load_adapter(peft_model_id=None) state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config) with self.assertRaises(ValueError): model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None)) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=dummy_input) def test_peft_add_adapter_with_state_dict_low_cpu_mem_usage(self): """ Check the usage of low_cpu_mem_usage, which is supported in PEFT >= 0.13.0 """ from peft import LoraConfig min_version_lcmu = "0.13.0" is_lcmu_supported = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version_lcmu) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig() state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) # this should always work model.load_adapter( adapter_state_dict=dummy_state_dict, peft_config=peft_config, low_cpu_mem_usage=False ) if is_lcmu_supported: # if supported, this should not raise an error model.load_adapter( adapter_state_dict=dummy_state_dict, adapter_name="other", peft_config=peft_config, low_cpu_mem_usage=True, ) # after loading, no meta device should be remaining self.assertFalse(any((p.device.type == "meta") for p in model.parameters())) else: err_msg = r"The version of PEFT you are using does not support `low_cpu_mem_usage` yet" with self.assertRaisesRegex(ValueError, err_msg): model.load_adapter( adapter_state_dict=dummy_state_dict, adapter_name="other", peft_config=peft_config, low_cpu_mem_usage=True, ) def test_peft_from_pretrained_hub_kwargs(self): """ Tests different combinations of PEFT model + from_pretrained + hub kwargs """ peft_model_id = "peft-internal-testing/tiny-opt-lora-revision" # This should not work with self.assertRaises(OSError): _ = AutoModelForCausalLM.from_pretrained(peft_model_id) adapter_kwargs = {"revision": "test"} # This should work model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) def test_peft_from_pretrained_unexpected_keys_warning(self): """ Test for warning when loading a PEFT checkpoint with unexpected keys. """ from peft import LoraConfig logger = logging.get_logger("transformers.integrations.peft") for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig() state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) # add unexpected key dummy_state_dict["foobar"] = next(iter(dummy_state_dict.values())) with CaptureLogger(logger) as cl: model.load_adapter( adapter_state_dict=dummy_state_dict, peft_config=peft_config, low_cpu_mem_usage=False ) msg = "Loading adapter weights from state_dict led to unexpected keys not found in the model: foobar" self.assertIn(msg, cl.out) def test_peft_from_pretrained_missing_keys_warning(self): """ Test for warning when loading a PEFT checkpoint with missing keys. """ from peft import LoraConfig logger = logging.get_logger("transformers.integrations.peft") for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig() state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) # remove a key so that we have missing keys key = next(iter(dummy_state_dict.keys())) del dummy_state_dict[key] with CaptureLogger(logger) as cl: model.load_adapter( adapter_state_dict=dummy_state_dict, peft_config=peft_config, low_cpu_mem_usage=False, adapter_name="other", ) # Here we need to adjust the key name a bit to account for PEFT-specific naming. # 1. Remove PEFT-specific prefix # If merged after dropping Python 3.8, we can use: key = key.removeprefix(peft_prefix) peft_prefix = "base_model.model." key = key[len(peft_prefix) :] # 2. Insert adapter name prefix, _, suffix = key.rpartition(".") key = f"{prefix}.other.{suffix}" msg = f"Loading adapter weights from state_dict led to missing keys in the model: {key}" self.assertIn(msg, cl.out) def test_peft_load_adapter_training_inference_mode_true(self): """ By default, when loading an adapter, the whole model should be in eval mode and no parameter should have requires_grad=False. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) model = transformers_class.from_pretrained(peft_model.config._name_or_path) model.load_adapter(tmpdirname) assert not any(p.requires_grad for p in model.parameters()) assert not any(m.training for m in model.modules()) del model def test_peft_load_adapter_training_inference_mode_false(self): """ When passing is_trainable=True, the LoRA modules should be in training mode and their parameters should have requires_grad=True. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) model = transformers_class.from_pretrained(peft_model.config._name_or_path) model.load_adapter(tmpdirname, is_trainable=True) for name, module in model.named_modules(): if list(module.children()): # only check leaf modules continue if "lora_" in name: assert module.training assert all(p.requires_grad for p in module.parameters()) else: assert not module.training assert all(not p.requires_grad for p in module.parameters()) def test_prefix_tuning_trainer_load_best_model_at_end_error(self): # Original issue: https://github.com/huggingface/peft/issues/2256 # There is a potential error when using load_best_model_at_end=True with a prompt learning PEFT method. This is # because Trainer uses load_adapter under the hood but with some prompt learning methods, there is an # optimization on the saved model to remove parameters that are not required for inference, which in turn # requires a change to the model architecture. This is why load_adapter will fail in such cases and users should # instead set load_best_model_at_end=False and use PeftModel.from_pretrained. As this is not obvious, we now # intercept the error and add a helpful error message. # This test checks this error message. It also tests the "happy path" (i.e. no error) when using LoRA. from peft import LoraConfig, PrefixTuningConfig, TaskType, get_peft_model # create a small sequence classification dataset (binary classification) dataset = [] for i, row in enumerate(os.__doc__.splitlines()): dataset.append({"text": row, "label": i % 2}) ds_train = Dataset.from_list(dataset) ds_valid = ds_train datasets = DatasetDict( { "train": ds_train, "val": ds_valid, } ) # tokenizer for peft-internal-testing/tiny-OPTForCausalLM-lora cannot be loaded, thus using # hf-internal-testing/tiny-random-OPTForCausalLM model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left", model_type="opt") def tokenize_function(examples): return tokenizer(examples["text"], max_length=128, truncation=True, padding="max_length") tokenized_datasets = datasets.map(tokenize_function, batched=True) # lora works, prefix-tuning is expected to raise an error peft_configs = { "lora": LoraConfig(task_type=TaskType.SEQ_CLS), "prefix-tuning": PrefixTuningConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, prefix_projection=True, num_virtual_tokens=10, ), } for peft_type, peft_config in peft_configs.items(): base_model = AutoModelForSequenceClassification.from_pretrained(model_id, num_labels=2) base_model.config.pad_token_id = tokenizer.pad_token_id peft_model = get_peft_model(base_model, peft_config) with tempfile.TemporaryDirectory() as tmpdirname: training_args = TrainingArguments( output_dir=tmpdirname, num_train_epochs=3, eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) trainer = Trainer( model=peft_model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["val"], ) if peft_type == "lora": # LoRA works with load_best_model_at_end trainer.train() else: # prefix tuning does not work, but at least users should get a helpful error message msg = "When using prompt learning PEFT methods such as PREFIX_TUNING" with self.assertRaisesRegex(RuntimeError, msg): trainer.train() def test_peft_pipeline_no_warning(self): """ Test to verify that the warning message "The model 'PeftModel' is not supported for text-generation" does not appear when using PeftModel with text-generation pipeline. """ from peft import PeftModel from transformers import pipeline ADAPTER_PATH = "peft-internal-testing/tiny-OPTForCausalLM-lora" BASE_PATH = "hf-internal-testing/tiny-random-OPTForCausalLM" # Input text for testing text = "Who is a Elon Musk?" model = AutoModelForCausalLM.from_pretrained( BASE_PATH, device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(BASE_PATH) lora_model = PeftModel.from_pretrained( model, ADAPTER_PATH, device_map="auto", ) # Create pipeline with PEFT model while capturing log output # Check that the warning message is not present in the logs pipeline_logger = logging.get_logger("transformers.pipelines.base") with self.assertNoLogs(pipeline_logger, logging.ERROR): lora_generator = pipeline( task="text-generation", model=lora_model, tokenizer=tokenizer, max_length=10, ) # Generate text to verify pipeline works _ = lora_generator(text, max_new_tokens=20)
transformers/tests/peft_integration/test_peft_integration.py/0
{ "file_path": "transformers/tests/peft_integration/test_peft_integration.py", "repo_id": "transformers", "token_count": 18968 }
595
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import datasets from huggingface_hub import ObjectDetectionOutputElement from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_pytesseract, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING _dataset = None @classmethod def _load_dataset(cls): # Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process. if cls._dataset is None: # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 cls._dataset = datasets.load_dataset( "hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1" ) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, dtype="float32", ): object_detector = ObjectDetectionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, dtype=dtype, ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): self._load_dataset() outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA self._dataset[0]["image"], # LA self._dataset[1]["image"], # L self._dataset[2]["image"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) compare_pipeline_output_to_hub_spec(detected_object, ObjectDetectionOutputElement) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @require_pytesseract @slow def test_layoutlm(self): model_id = "Narsil/layoutlmv3-finetuned-funsd" threshold = 0.9993 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ], )
transformers/tests/pipelines/test_pipelines_object_detection.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_object_detection.py", "repo_id": "transformers", "token_count": 6407 }
596
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, EetqConfig, OPTForCausalLM from transformers.testing_utils import ( backend_empty_cache, require_accelerate, require_eetq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class EetqConfigTest(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = EetqConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "eetq", "weights": "int8"} quantization_config = EetqConfig.from_dict(dict) self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert) self.assertEqual(dict["quant_method"], quantization_config.quant_method) self.assertEqual(dict["weights"], quantization_config.weights) @slow @require_torch_gpu @require_eetq @require_accelerate class EetqTest(unittest.TestCase): model_name = "facebook/opt-350m" input_text = "What are we having for dinner?" max_new_tokens = 9 EXPECTED_OUTPUT = "What are we having for dinner?\nI'm having a steak and a salad" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ quantization_config = EetqConfig(weights="int8") cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, quantization_config=quantization_config ) def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from eetq import EetqLinear from transformers.integrations import replace_with_eetq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = EetqConfig(weights="int8") with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model = replace_with_eetq_linear(model, quantization_config=quantization_config) nb_eetq_linear = 0 for module in model.modules(): if isinstance(module, EetqLinear): nb_eetq_linear += 1 self.assertEqual(nb_linears - 1, nb_eetq_linear) # Try with `modules_to_not_convert` with init_empty_weights(): model = OPTForCausalLM(config) quantization_config = EetqConfig(modules_to_not_convert=["fc1"]) model = replace_with_eetq_linear(model, quantization_config=quantization_config) nb_eetq_linear = 0 for module in model.modules(): if isinstance(module, EetqLinear): nb_eetq_linear += 1 # 25 corresponds to the lm_head along with 24 fc1 layers. self.assertEqual(nb_linears - 25, nb_eetq_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = EetqConfig() quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map="auto", quantization_config=quantization_config ) self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
transformers/tests/quantization/eetq_integration/test_eetq.py/0
{ "file_path": "transformers/tests/quantization/eetq_integration/test_eetq.py", "repo_id": "transformers", "token_count": 2640 }
597
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
transformers/tests/sagemaker/__init__.py/0
{ "file_path": "transformers/tests/sagemaker/__init__.py", "repo_id": "transformers", "token_count": 36 }
598
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from parameterized import parameterized from transformers.testing_utils import require_torch, require_vision from transformers.utils.import_utils import is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): import PIL.Image from transformers.image_transforms import ( center_crop, center_to_corners_format, convert_to_rgb, corners_to_center_format, flip_channel_order, get_resize_output_image_size, id_to_rgb, normalize, pad, resize, rgb_to_id, to_channel_dimension_format, to_pil_image, ) def get_random_image(height, width, num_channels=3, channels_first=True): shape = (num_channels, height, width) if channels_first else (height, width, num_channels) random_array = np.random.randint(0, 256, shape, dtype=np.uint8) return random_array @require_vision class ImageTransformsTester(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) # Make sure that an exception is raised if image is not in [0, 1] image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): # Make sure binary mask remains a binary mask image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_torch def test_to_pil_image_from_torch(self): # channels first image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels last image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): # Test that function doesn't reorder if channel dim matches the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) # Test that function reorders if channel dim doesn't match the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) # Can pass in input_data_format and works if data format is ambiguous or unknown. image = np.random.rand(4, 5, 6) image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last") self.assertEqual(image.shape, (6, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test the output size defaults to (x, x) if an int is given. self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) # Test the output size is the same as the input if a two element tuple/list is given. self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) # To match pytorch behaviour, max_size is only relevant if size is an int self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) # Test output size = (int(size * height / width), size) if size is an int and height > width image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) # Test output size = (size, int(size * width / height)) if size is an int and width <= height image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) # Test size is resized if longer size > max_size image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) # Test output size = (int(size * height / width), size) if size is an int and height > width and # input has 4 channels image = np.random.randint(0, 256, (4, 50, 40)) self.assertEqual( get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"), (25, 20), ) # Test correct channel dimension is returned if output size if height == 3 # Defaults to input format - channels first image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) # Defaults to input format - channels last image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) # Check the channel order is the same by default resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) # Check channel order is changed if specified resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) # Check PIL.Image.Image is returned if return_numpy=False resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) # PIL size is in (width, height) order self.assertEqual(resized_image.size, (40, 30)) # Check an image with float values between 0-1 is returned with values in this range image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) # Check that an image with 4 channels is resized correctly image = np.random.randint(0, 256, (4, 224, 224)) resized_image = resize(image, (30, 40), input_data_format="channels_first") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (4, 30, 40)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 # Test that exception is raised if inputs are incorrect # Not a numpy array image with self.assertRaises(TypeError): normalize(5, 5, 5) # Number of mean values != number of channels with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) # Number of std values != number of channels with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) # Test result is correct - output data format is channels_first and normalization # correctly computed mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test image with 4 channels is normalized correctly image = np.random.randint(0, 256, (224, 224, 4)) / 255 mean = (0.5, 0.6, 0.7, 0.8) std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( np.allclose( normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 ) ) # Test float32 image input keeps float32 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).astype(np.float32) normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test float16 image input keeps float16 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 mean = np.array((0.5, 0.6, 0.7)) std = np.array((0.1, 0.2, 0.3)) # The mean and std are cast to match the dtype of the input image cast_mean = np.array(mean, dtype=np.float16) cast_std = np.array(std, dtype=np.float16) expected_image = (image - cast_mean) / cast_std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float16) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test int image input is converted to float32 image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = (image.astype(np.float32) - mean) / std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test that exception is raised if inputs are incorrect with self.assertRaises(ValueError): center_crop(image, 10) # Test result is correct - output data format is channels_first and center crop # correctly computed expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that image is padded with zeros if crop size is larger than image size expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that odd numbered padding requirement still leads to correct output dimensions cropped_image = center_crop(image, (300, 259), data_format="channels_last") self.assertEqual(cropped_image.shape, (300, 259, 3)) # Test image with 4 channels is cropped correctly image = np.random.randint(0, 256, (224, 224, 4)) expected_image = image[52:172, 82:142, :] self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): # test list input rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) # test numpy array input color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): # test int input self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) # test array input id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) # fmt: on # Test that exception is raised if unknown padding mode is specified with self.assertRaises(ValueError): pad(image, 10, mode="unknown") # Test that exception is raised if invalid padding is specified with self.assertRaises(ValueError): # Cannot pad on channel dimension pad(image, (5, 10, 10)) # Test image is padded equally on all sides is padding is an int # fmt: off expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, 1))) # Test the left and right of each axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) # Test only one axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) # Test padding with a constant value # fmt: off expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) # fmt: off image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) # fmt: on # Test padding with PaddingMode.REFLECT # fmt: off expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) # Test padding with PaddingMode.REPLICATE # fmt: off expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) # Test padding with PaddingMode.SYMMETRIC # fmt: off expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) # Test we can specify the output data format # Test padding with PaddingMode.REFLECT # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) # fmt: on self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) # Test we can pad on an image with 2 channels # fmt: off image = np.array([ [[0, 1], [2, 3]], ]) expected_image = np.array([ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ]) # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) # Test that padding works on batched images image = np.array( [ [[0, 1], [2, 3]], ] )[None, ...] expected_image = np.array( [ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ] )[None, ...] # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) @require_vision def test_convert_to_rgb(self): # Test that an RGBA image is converted to RGB image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) # For the moment, numpy images are returned as is rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) # And PIL images are converted rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) # Test that a grayscale image is converted to RGB image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) # fmt: on img_channels_last = np.moveaxis(img_channels_first, 0, -1) # fmt: off flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) # fmt: on flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) ) # Can flip when the image has 2 channels # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], ]) # fmt: on flipped_img_channels_first = img_channels_first[::-1, :, :] self.assertTrue( np.allclose( flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first ) )
transformers/tests/test_image_transforms.py/0
{ "file_path": "transformers/tests/test_image_transforms.py", "repo_id": "transformers", "token_count": 12214 }
599
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_accelerator, run_first, torch_device, ) from transformers.training_args import ParallelMode from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset, IterableDataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result class TestTrainerDistributed(TestCasePlus): @run_first @require_torch_multi_accelerator def test_trainer(self): distributed_args = f"""--nproc_per_node={backend_device_count(torch_device)} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir} --report_to none".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None # Check that `dispatch_batches=False` will work on a finite iterable dataset train_dataset = FiniteIterableDataset(label_names=["labels", "extra"], length=1) model = RegressionModel() training_args.per_device_train_batch_size = 1 training_args.max_steps = 1 training_args.accelerator_config.dispatch_batches = False trainer = Trainer(model, training_args, train_dataset=train_dataset) trainer.train()
transformers/tests/trainer/test_trainer_distributed.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_distributed.py", "repo_id": "transformers", "token_count": 3336 }
600
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from transformers.audio_utils import ( amplitude_to_db, amplitude_to_db_batch, chroma_filter_bank, hertz_to_mel, mel_filter_bank, mel_to_hertz, power_to_db, power_to_db_batch, spectrogram, spectrogram_batch, window_function, ) from transformers.testing_utils import is_librosa_available, require_librosa if is_librosa_available(): from librosa.filters import chroma class AudioUtilsFunctionTester(unittest.TestCase): # will be set in `def _load_datasamples` _dataset = None def test_hertz_to_mel(self): self.assertEqual(hertz_to_mel(0.0), 0.0) self.assertAlmostEqual(hertz_to_mel(100), 150.48910241) inputs = np.array([100, 200]) expected = np.array([150.48910241, 283.22989816]) self.assertTrue(np.allclose(hertz_to_mel(inputs), expected)) self.assertEqual(hertz_to_mel(0.0, "slaney"), 0.0) self.assertEqual(hertz_to_mel(100, "slaney"), 1.5) inputs = np.array([60, 100, 200, 1000, 1001, 2000]) expected = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016]) self.assertTrue(np.allclose(hertz_to_mel(inputs, "slaney"), expected)) inputs = np.array([60, 100, 200, 1000, 1001, 2000]) expected = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674]) self.assertTrue(np.allclose(hertz_to_mel(inputs, "kaldi"), expected)) with pytest.raises(ValueError): hertz_to_mel(100, mel_scale=None) def test_mel_to_hertz(self): self.assertEqual(mel_to_hertz(0.0), 0.0) self.assertAlmostEqual(mel_to_hertz(150.48910241), 100) inputs = np.array([150.48910241, 283.22989816]) expected = np.array([100, 200]) self.assertTrue(np.allclose(mel_to_hertz(inputs), expected)) self.assertEqual(mel_to_hertz(0.0, "slaney"), 0.0) self.assertEqual(mel_to_hertz(1.5, "slaney"), 100) inputs = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016]) expected = np.array([60, 100, 200, 1000, 1001, 2000]) self.assertTrue(np.allclose(mel_to_hertz(inputs, "slaney"), expected)) inputs = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674]) expected = np.array([60, 100, 200, 1000, 1001, 2000]) self.assertTrue(np.allclose(mel_to_hertz(inputs, "kaldi"), expected)) with pytest.raises(ValueError): mel_to_hertz(100, mel_scale=None) def test_mel_filter_bank_shape(self): mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm="slaney", mel_scale="slaney", ) self.assertEqual(mel_filters.shape, (513, 13)) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm="slaney", mel_scale="slaney", triangularize_in_mel_space=True, ) self.assertEqual(mel_filters.shape, (513, 13)) def test_mel_filter_bank_htk(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="htk", ) # fmt: off expected = np.array([ [0.0 , 0.0 , 0.0 , 0.0 ], [0.61454786, 0.0 , 0.0 , 0.0 ], [0.82511046, 0.17488954, 0.0 , 0.0 ], [0.35597035, 0.64402965, 0.0 , 0.0 ], [0.0 , 0.91360726, 0.08639274, 0.0 ], [0.0 , 0.55547007, 0.44452993, 0.0 ], [0.0 , 0.19733289, 0.80266711, 0.0 ], [0.0 , 0.0 , 0.87724349, 0.12275651], [0.0 , 0.0 , 0.6038449 , 0.3961551 ], [0.0 , 0.0 , 0.33044631, 0.66955369], [0.0 , 0.0 , 0.05704771, 0.94295229], [0.0 , 0.0 , 0.0 , 0.83483975], [0.0 , 0.0 , 0.0 , 0.62612982], [0.0 , 0.0 , 0.0 , 0.41741988], [0.0 , 0.0 , 0.0 , 0.20870994], [0.0 , 0.0 , 0.0 , 0.0 ] ]) # fmt: on self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_slaney(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="slaney", ) # fmt: off expected = np.array([ [0.0 , 0.0 , 0.0 , 0.0 ], [0.39869419, 0.0 , 0.0 , 0.0 ], [0.79738839, 0.0 , 0.0 , 0.0 ], [0.80391742, 0.19608258, 0.0 , 0.0 ], [0.40522322, 0.59477678, 0.0 , 0.0 ], [0.00652903, 0.99347097, 0.0 , 0.0 ], [0.0 , 0.60796161, 0.39203839, 0.0 ], [0.0 , 0.20939631, 0.79060369, 0.0 ], [0.0 , 0.0 , 0.84685344, 0.15314656], [0.0 , 0.0 , 0.52418477, 0.47581523], [0.0 , 0.0 , 0.2015161 , 0.7984839 ], [0.0 , 0.0 , 0.0 , 0.9141874 ], [0.0 , 0.0 , 0.0 , 0.68564055], [0.0 , 0.0 , 0.0 , 0.4570937 ], [0.0 , 0.0 , 0.0 , 0.22854685], [0.0 , 0.0 , 0.0 , 0.0 ] ]) # fmt: on self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_kaldi(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) # fmt: off # here the expected values from torchaudio.compliance.kaldi.get_mel_banks # note that we compute values in float64 while they do it in float32 expected = np.array( [ [0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.0000000000000000], [0.6457883715629578, 0.0000000000000000, 0.0000000000000000, 0.0000000000000000], [0.8044781088829041, 0.1955219060182571, 0.0000000000000000, 0.0000000000000000], [0.3258901536464691, 0.6741098165512085, 0.0000000000000000, 0.0000000000000000], [0.0000000000000000, 0.9021250009536743, 0.0978749766945839, 0.0000000000000000], [0.0000000000000000, 0.5219038724899292, 0.4780961275100708, 0.0000000000000000], [0.0000000000000000, 0.1771058291196823, 0.8228941559791565, 0.0000000000000000], [0.0000000000000000, 0.0000000000000000, 0.8616894483566284, 0.1383105516433716], [0.0000000000000000, 0.0000000000000000, 0.5710380673408508, 0.4289619624614716], [0.0000000000000000, 0.0000000000000000, 0.3015440106391907, 0.6984559893608093], [0.0000000000000000, 0.0000000000000000, 0.0503356307744980, 0.9496643543243408], [0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.8150880336761475], [0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.5938932299613953], [0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.3851676583290100], [0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.1875794380903244], ], dtype=np.float64, ) # fmt: on # kaldi implementation does not compute values for last fft bin # indeed, they enforce max_frequency <= sampling_rate / 2 and # therefore they know that last fft bin filter bank values will be all 0 # and pad after with zeros # to comply with our API for `mel_filter_bank`, we need to also pad here expected = np.pad(expected, ((0, 1), (0, 0))) self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_slaney_norm(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm="slaney", mel_scale="slaney", ) # fmt: off expected = np.array([ [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [1.19217795e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [2.38435591e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [2.40387905e-03, 5.86232616e-04, 0.00000000e+00, 0.00000000e+00], [1.21170110e-03, 1.77821783e-03, 0.00000000e+00, 0.00000000e+00], [1.95231437e-05, 2.97020305e-03, 0.00000000e+00, 0.00000000e+00], [0.00000000e+00, 1.81763684e-03, 1.04857612e-03, 0.00000000e+00], [0.00000000e+00, 6.26036972e-04, 2.11460963e-03, 0.00000000e+00], [0.00000000e+00, 0.00000000e+00, 2.26505954e-03, 3.07332945e-04], [0.00000000e+00, 0.00000000e+00, 1.40202503e-03, 9.54861093e-04], [0.00000000e+00, 0.00000000e+00, 5.38990521e-04, 1.60238924e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.83458185e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37593638e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 9.17290923e-04], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 4.58645462e-04], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] ]) # fmt: on self.assertTrue(np.allclose(mel_filters, expected)) def test_window_function(self): window = window_function(16, "hann") self.assertEqual(len(window), 16) # fmt: off expected = np.array([ 0.0, 0.03806023, 0.14644661, 0.30865828, 0.5, 0.69134172, 0.85355339, 0.96193977, 1.0, 0.96193977, 0.85355339, 0.69134172, 0.5, 0.30865828, 0.14644661, 0.03806023, ]) # fmt: on self.assertTrue(np.allclose(window, expected)) def _load_datasamples(self, num_samples): from datasets import load_dataset if self._dataset is None: self._dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = self._dataset.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_spectrogram_impulse(self): waveform = np.zeros(40) waveform[9] = 1.0 # impulse shifted in time spec = spectrogram( waveform, window_function(12, "hann", frame_length=16), frame_length=16, hop_length=4, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (9, 11)) expected = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) self.assertTrue(np.allclose(spec, expected)) def test_spectrogram_batch_impulse(self): waveform1 = np.zeros(40) waveform1[9] = 1.0 waveform2 = np.zeros(28) waveform2[12] = 3.0 waveform3 = np.zeros(51) waveform3[26] = 4.5 waveform_list = [waveform1, waveform2, waveform3] spec_list = spectrogram_batch( waveform_list, window_function(12, "hann", frame_length=16), frame_length=16, hop_length=4, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec_list[0].shape, (9, 11)) self.assertEqual(spec_list[1].shape, (9, 8)) self.assertEqual(spec_list[2].shape, (9, 13)) expected1 = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) expected2 = np.array([[0.0, 0.0, 0.75, 3.0, 0.75, 0.0, 0.0, 0.0]]) expected3 = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.375, 3.375, 0.0, 0.0, 0.0, 0.0, 0.0]]) self.assertTrue(np.allclose(spec_list[0], expected1)) self.assertTrue(np.allclose(spec_list[1], expected2)) self.assertTrue(np.allclose(spec_list[2], expected3)) def test_spectrogram_integration_test(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) # fmt: off expected = np.array([ 0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 , 0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357, 0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792, 0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558, 0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598, 0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042, 0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293, 0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 , 0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666, 0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069, 0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424, 0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637, 0.0293578 , 0.03452379, 0.02194803, 0.01676056, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 400], expected)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) self.assertTrue(np.allclose(spec[:64, 400], expected)) mel_filters = mel_filter_bank( num_frequency_bins=257, num_mel_filters=400, min_frequency=20, max_frequency=8000, sampling_rate=16000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) spec = spectrogram( waveform, window_function(400, "povey", periodic=False), frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, pad_mode="reflect", onesided=True, preemphasis=0.97, mel_filters=mel_filters, log_mel="log", mel_floor=1.1920928955078125e-07, remove_dc_offset=True, ) self.assertEqual(spec.shape, (400, 584)) # fmt: off expected = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -6.52463769, -7.73677889, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.18650018, -3.37195286, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.70190154, -2.4217066 , -15.94238515, -15.94238515, -15.94238515, -15.94238515, -5.62755239, -3.53385194, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -9.43303023, -8.77480925, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.2951092 , -5.51585994, -15.94238515, -15.94238515, -15.94238515, -4.40151721, -3.95228878, -15.94238515, -15.94238515, -15.94238515, -6.10365415, -4.59494697, -15.94238515, -15.94238515, -15.94238515, -8.10727767, -6.2585298 , -15.94238515, -15.94238515, -15.94238515, -5.60161702, -4.47217004, -15.94238515, -15.94238515, -15.94238515, -5.91641988] ) # fmt: on self.assertTrue(np.allclose(spec[:64, 400], expected, atol=1e-5)) def test_spectrogram_batch_integration_test(self): waveform_list = self._load_datasamples(3) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[2].shape, (257, 1561)) # fmt: off expected1 = np.array([ 0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 , 0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357, 0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792, 0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558, 0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598, 0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042, 0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293, 0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 , 0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666, 0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069, 0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424, 0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637, 0.0293578 , 0.03452379, 0.02194803, 0.01676056, ]) expected2 = np.array([ 7.61983171e-02, 1.45338190e-01, 2.63903728e+00, 7.74429535e+00, 9.61932980e+00, 5.40767686e+00, 1.08924884e+00, 3.40908262e+00, 3.59484250e+00, 1.68451077e+00, 5.88405873e-01, 1.17042530e+00, 9.94803324e-01, 3.53757065e-01, 5.47699239e-01, 9.48368581e-01, 7.17770457e-01, 2.09396633e-01, 1.77574463e-01, 2.35644731e-01, 1.31535991e-01, 1.53539552e-02, 4.34416305e-02, 5.32897267e-02, 4.03567305e-02, 1.41842226e-02, 2.90514538e-02, 3.36549485e-02, 1.53516624e-02, 2.37464225e-02, 4.60092464e-02, 4.05769324e-02, 4.82633401e-03, 4.12675364e-02, 7.13859796e-02, 6.16866566e-02, 2.55657822e-02, 1.68923281e-02, 1.91299946e-02, 1.60033798e-02, 1.33405095e-02, 1.52065457e-02, 1.21833352e-02, 2.25786382e-03, 6.15358376e-03, 1.07647616e-02, 1.23051018e-02, 6.75289378e-03, 2.71127435e-03, 1.06515263e-02, 1.18463583e-02, 7.14347935e-03, 1.87912782e-03, 4.44236027e-03, 5.19630243e-03, 2.46666998e-03, 1.01598645e-03, 1.21589237e-03, 1.29095500e-03, 1.07447628e-03, 1.40218156e-03, 3.65402623e-03, 4.00592755e-03, 4.20001841e-03 ]) expected3 = np.array([ 0.07805249, 0.34305022, 0.55617084, 1.22475182, 1.17040678, 0.51540532, 0.23570016, 0.06630775, 0.09017777, 0.07693192, 0.0333643 , 0.04873054, 0.04668559, 0.02384041, 0.02780435, 0.0289717 , 0.01704903, 0.0201644 , 0.01700376, 0.02176975, 0.02042491, 0.00732129, 0.00326042, 0.00245065, 0.00510645, 0.00681892, 0.00739329, 0.00551437, 0.0070674 , 0.00630015, 0.00379566, 0.0060098 , 0.00311543, 0.00902284, 0.01171038, 0.01202166, 0.01759194, 0.01652899, 0.01201872, 0.01295351, 0.00756432, 0.01415318, 0.02349972, 0.02296833, 0.02429341, 0.02447459, 0.01835044, 0.01437871, 0.02262246, 0.02972324, 0.03392252, 0.03037546, 0.01116927, 0.01555062, 0.02833379, 0.02294212, 0.02069847, 0.02496927, 0.02273526, 0.01341643, 0.00805407, 0.00624943, 0.01076262, 0.01876003 ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][:64, 400], expected1)) self.assertTrue(np.allclose(spec_list[1][:64, 400], expected2)) self.assertTrue(np.allclose(spec_list[2][:64, 400], expected3)) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[2].shape, (257, 1561)) self.assertTrue(np.allclose(spec_list[0][:64, 400], expected1)) self.assertTrue(np.allclose(spec_list[1][:64, 400], expected2)) self.assertTrue(np.allclose(spec_list[2][:64, 400], expected3)) mel_filters = mel_filter_bank( num_frequency_bins=257, num_mel_filters=400, min_frequency=20, max_frequency=8000, sampling_rate=16000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) spec_list = spectrogram_batch( waveform_list, window_function(400, "povey", periodic=False), frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, pad_mode="reflect", onesided=True, preemphasis=0.97, mel_filters=mel_filters, log_mel="log", mel_floor=1.1920928955078125e-07, remove_dc_offset=True, ) self.assertEqual(spec_list[0].shape, (400, 584)) self.assertEqual(spec_list[1].shape, (400, 480)) self.assertEqual(spec_list[2].shape, (400, 1247)) # fmt: off expected1 = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -6.52463769, -7.73677889, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.18650018, -3.37195286, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.70190154, -2.4217066 , -15.94238515, -15.94238515, -15.94238515, -15.94238515, -5.62755239, -3.53385194, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -9.43303023, -8.77480925, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.2951092 , -5.51585994, -15.94238515, -15.94238515, -15.94238515, -4.40151721, -3.95228878, -15.94238515, -15.94238515, -15.94238515, -6.10365415, -4.59494697, -15.94238515, -15.94238515, -15.94238515, -8.10727767, -6.2585298 , -15.94238515, -15.94238515, -15.94238515, -5.60161702, -4.47217004, -15.94238515, -15.94238515, -15.94238515, -5.91641988] ) expected2 = np.array([-15.942385, -8.531508, -8.551396, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -5.626043, -6.8381968, -15.942385, -15.942385, -15.942385, -15.942385, -3.3122184, -2.49764, -15.942385, -15.942385, -15.942385, -15.942385, -3.625868, -1.3457257, -15.942385, -15.942385, -15.942385, -15.942385, -4.2223063, -2.1285915, -15.942385, -15.942385, -15.942385, -15.942385, -8.611152, -7.952894, -15.942385, -15.942385, -15.942385, -15.942385, -2.7585578, -3.9793255, -15.942385, -15.942385, -15.942385, -2.5377562, -2.0885658, -15.942385, -15.942385, -15.942385, -3.8310733, -2.322393, -15.942385, -15.942385, -15.942385, -7.674944, -5.8261633, -15.942385, -15.942385, -15.942385, -3.5960004, -2.4665844, -15.942385, -15.942385, -15.942385, -1.7905309] ) expected3 = np.array([-15.942385, -13.406995, -13.426883, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -15.942385, -13.493383, -12.678805, -15.942385, -15.942385, -15.942385, -15.942385, -14.809377, -12.529235, -15.942385, -15.942385, -15.942385, -15.942385, -13.838827, -11.745112, -15.942385, -15.942385, -15.942385, -15.942385, -13.9336405, -13.275384, -15.942385, -15.942385, -15.942385, -15.942385, -13.043786, -14.264554, -15.942385, -15.942385, -15.942385, -13.060181, -12.610991, -15.942385, -15.942385, -15.942385, -14.152064, -12.643384, -15.942385, -15.942385, -15.942385, -14.48317, -12.634389, -15.942385, -15.942385, -15.942385, -14.627316, -13.4979, -15.942385, -15.942385, -15.942385, -12.6279955] ) # fmt: on self.assertTrue(np.allclose(spec_list[0][:64, 400], expected1, atol=1e-5)) self.assertTrue(np.allclose(spec_list[1][:64, 400], expected2, atol=1e-5)) self.assertTrue(np.allclose(spec_list[2][:64, 400], expected3, atol=1e-5)) def test_spectrogram_center_padding(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="reflect", ) self.assertEqual(spec.shape, (257, 732)) # fmt: off expected = np.array([ 0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202, 0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668, 0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998, 0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725, 0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529, 0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339, 0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734, 0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963, 0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949, 0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404, 0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862, 0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 , 0.00217659, 0.00276204, 0.00260835, 0.00299299, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 0], expected)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="constant", ) self.assertEqual(spec.shape, (257, 732)) # fmt: off expected = np.array([ 0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115, 0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055, 0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367, 0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621, 0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947, 0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912, 0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984, 0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813, 0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781, 0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 , 0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322, 0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599, 0.00788239, 0.00664407, 0.00824227, 0.00628301, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 0], expected)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=False, ) self.assertEqual(spec.shape, (257, 728)) # fmt: off expected = np.array([ 0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727, 0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 , 0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623, 0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692, 0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 , 0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883, 0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801, 0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778, 0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908, 0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476, 0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512, 0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876, 0.00811857, 0.00538216, 0.00685749, 0.00535275, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 0], expected)) def test_spectrogram_batch_center_padding(self): waveform_list = self._load_datasamples(3) spec_list = spectrogram_batch( waveform_list, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="reflect", ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[2].shape, (257, 1561)) # fmt: off expected1 = np.array([ 0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202, 0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668, 0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998, 0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725, 0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529, 0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339, 0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734, 0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963, 0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949, 0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404, 0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862, 0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 , 0.00217659, 0.00276204, 0.00260835, 0.00299299, ]) expected2 = np.array([ 1.89624839e-02, 1.23274978e-02, 3.69160250e-02, 4.76267971e-02, 1.39258439e-02, 2.98370440e-02, 2.74845166e-03, 3.01934010e-03, 1.18722776e-02, 9.70834121e-03, 2.06300567e-04, 6.32975250e-04, 8.20603687e-03, 1.21864351e-02, 3.28791840e-03, 3.36801982e-04, 2.79373326e-03, 5.00530424e-03, 8.46884679e-03, 1.14089288e-02, 8.59052036e-03, 2.88538425e-03, 9.95071139e-03, 6.80431770e-03, 2.95809377e-03, 1.46285209e-04, 3.36268265e-03, 4.80051298e-04, 2.84506916e-03, 9.34222655e-04, 3.42161348e-03, 2.79612141e-03, 3.38875921e-03, 2.85030343e-03, 5.39513239e-05, 2.72908504e-03, 2.09591188e-03, 5.00271388e-04, 8.31917219e-04, 2.37967237e-03, 1.75001193e-03, 1.31826295e-04, 8.83622793e-04, 1.54303256e-04, 3.09544569e-03, 4.08527814e-03, 2.73566321e-03, 1.78805250e-03, 9.53314066e-06, 1.74316950e-03, 1.51099428e-03, 8.65990878e-04, 8.44859460e-04, 5.35220199e-04, 5.36562002e-04, 8.33181897e-04, 8.22705682e-04, 1.81083288e-03, 9.75003233e-04, 6.73114730e-04, 6.81665202e-04, 2.05180887e-03, 1.10151991e-03, 4.75923851e-04, ]) expected3 = np.array([ 0.07079848, 0.04237922, 0.0220724, 0.04446052, 0.03598337, 0.03327273, 0.02545774, 0.01319528, 0.00919659, 0.01376867, 0.00361992, 0.00608425, 0.01105873, 0.0105565, 0.00744286, 0.00244849, 0.00257317, 0.00749989, 0.01061386, 0.01525312, 0.00656914, 0.01199581, 0.00487319, 0.00830956, 0.0046706, 0.00588962, 0.00544486, 0.00565179, 0.00050112, 0.01108059, 0.00217417, 0.00453234, 0.00537306, 0.00269329, 0.00342333, 0.00095484, 0.00708934, 0.00660373, 0.00543686, 0.00217186, 0.00431519, 0.00457764, 0.00503529, 0.01166454, 0.01375581, 0.01467224, 0.00873404, 0.00534086, 0.00476848, 0.0226163, 0.0314, 0.00151021, 0.01975221, 0.01637519, 0.00046068, 0.0460544, 0.06285986, 0.03151625, 0.0013598, 0.004804, 0.0073824, 0.02312599, 0.02613977, 0.01056851 ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][:64, 0], expected1)) self.assertTrue(np.allclose(spec_list[1][:64, 0], expected2)) self.assertTrue(np.allclose(spec_list[2][:64, 0], expected3)) spec_list = spectrogram_batch( waveform_list, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="constant", ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[2].shape, (257, 1561)) # fmt: off expected1 = np.array([ 0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115, 0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055, 0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367, 0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621, 0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947, 0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912, 0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984, 0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813, 0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781, 0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 , 0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322, 0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599, 0.00788239, 0.00664407, 0.00824227, 0.00628301, ]) expected2 = np.array([ 0.00955754, 0.01445548, 0.02393902, 0.02903068, 0.02512844, 0.01508297, 0.00474784, 0.00440362, 0.0073898, 0.00546519, 0.00126077, 0.00240507, 0.00523254, 0.00632742, 0.00415215, 0.00056628, 0.00161288, 0.0026956, 0.00431587, 0.00621471, 0.00791291, 0.0079454, 0.00594525, 0.00334581, 0.00180047, 0.00144485, 0.00175764, 0.00188037, 0.00134889, 0.00150253, 0.00178821, 0.00158875, 0.00204339, 0.00266497, 0.00280556, 0.00221949, 0.00108956, 0.000532, 0.00108454, 0.00129254, 0.00089315, 0.00022803, 0.00038176, 0.0011302, 0.00189306, 0.0021964, 0.00203576, 0.00207306, 0.00217727, 0.00174297, 0.00103331, 0.00076695, 0.0007422, 0.00061986, 0.00081204, 0.00079615, 0.00089417, 0.00105452, 0.00042615, 0.00066372, 0.00132765, 0.00122087, 0.00054903, 0.00107945, ]) expected3 = np.array([ 0.03573493, 0.03625983, 0.03341755, 0.02431477, 0.01770546, 0.0169356 , 0.01579034, 0.01600499, 0.01329064, 0.00747957, 0.00367372, 0.00403853, 0.00519597, 0.00551022, 0.00532757, 0.00367569, 0.00130341, 0.00345149, 0.00520744, 0.00872308, 0.01172503, 0.00948154, 0.00344236, 0.00387997, 0.00425455, 0.00394357, 0.00711733, 0.00615654, 0.00055756, 0.00656414, 0.00852001, 0.00666252, 0.00509767, 0.00246784, 0.00376049, 0.00682879, 0.00641118, 0.00469685, 0.00358701, 0.0015552 , 0.00261458, 0.00701979, 0.00929578, 0.00894536, 0.00828491, 0.00773528, 0.00552091, 0.00259871, 0.00933179, 0.01588626, 0.01697887, 0.01268552, 0.00957255, 0.01204092, 0.02123362, 0.03062669, 0.03215763, 0.02629963, 0.01769568, 0.01088869, 0.01151334, 0.01378197, 0.01319263, 0.01066859, ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][:64, 0], expected1)) self.assertTrue(np.allclose(spec_list[1][:64, 0], expected2)) self.assertTrue(np.allclose(spec_list[2][:64, 0], expected3)) spec_list = spectrogram_batch( waveform_list, window_function(512, "hann"), frame_length=512, hop_length=128, center=False, ) self.assertEqual(spec_list[0].shape, (257, 728)) self.assertEqual(spec_list[1].shape, (257, 598)) self.assertEqual(spec_list[2].shape, (257, 1557)) # fmt: off expected1 = np.array([ 0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727, 0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 , 0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623, 0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692, 0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 , 0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883, 0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801, 0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778, 0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908, 0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476, 0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512, 0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876, 0.00811857, 0.00538216, 0.00685749, 0.00535275, ]) expected2 = np.array([ 0.01232908, 0.05980514, 0.08285419, 0.01850723, 0.02823627, 0.00204369, 0.01372626, 0.00956435, 0.02267217, 0.00947112, 0.00355174, 0.00418008, 0.00843608, 0.01559252, 0.01125505, 0.00183573, 0.00765051, 0.0109983 , 0.00890545, 0.00583453, 0.00115901, 0.00579039, 0.00151353, 0.00395812, 0.00231413, 0.00384272, 0.00313914, 0.00072331, 0.00338935, 0.00383328, 0.00218129, 0.00284516, 0.00228538, 0.00083603, 0.00111663, 0.00235799, 0.00142748, 0.00092908, 0.0012966 , 0.0011403 , 0.0010619 , 0.00158732, 0.00289866, 0.00216709, 0.00313325, 0.00361277, 0.00202507, 0.0009948 , 0.00114428, 0.00200851, 0.0009234 , 0.00063468, 0.00018746, 0.00100463, 0.00053799, 0.00080009, 0.00158291, 0.00172077, 0.00173586, 0.00197127, 0.00107058, 0.00043486, 0.0009859 , 0.00215484, ]) expected3 = np.array([ 0.01864123, 0.06131337, 0.08346292, 0.04936386, 0.02792609, 0.01005205, 0.00884826, 0.02198604, 0.02421535, 0.00957573, 0.00503561, 0.00241331, 0.00175652, 0.00195889, 0.00453299, 0.0020317 , 0.00249264, 0.00517483, 0.01111943, 0.0150079 , 0.01977743, 0.01253825, 0.00517561, 0.01031712, 0.00579466, 0.00783679, 0.0071415 , 0.00591847, 0.01510728, 0.01194921, 0.00518072, 0.00125978, 0.00577552, 0.01050614, 0.0077644 , 0.0042905 , 0.00278469, 0.00166695, 0.00255013, 0.00578153, 0.00586451, 0.00929514, 0.01501226, 0.00741419, 0.00310625, 0.00086757, 0.00595618, 0.0053882 , 0.0116266 , 0.02504773, 0.02889692, 0.03739442, 0.04730207, 0.03856638, 0.05700104, 0.04299267, 0.02153366, 0.03740607, 0.03811468, 0.01575022, 0.00676344, 0.01359865, 0.01769319, 0.00907966, ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][:64, 0], expected1)) self.assertTrue(np.allclose(spec_list[1][:64, 0], expected2)) self.assertTrue(np.allclose(spec_list[2][:64, 0], expected3)) def test_spectrogram_shapes(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (201, 732)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=False, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (201, 729)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 1464)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 1464)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=512, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 183)) def test_spectrogram_batch_shapes(self): waveform_list = self._load_datasamples(3) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec_list[0].shape, (201, 732)) self.assertEqual(spec_list[1].shape, (201, 602)) self.assertEqual(spec_list[2].shape, (201, 1561)) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=False, pad_mode="reflect", onesided=True, ) self.assertEqual(spec_list[0].shape, (201, 729)) self.assertEqual(spec_list[1].shape, (201, 599)) self.assertEqual(spec_list[2].shape, (201, 1558)) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[2].shape, (257, 1561)) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec_list[0].shape, (512, 1464)) self.assertEqual(spec_list[1].shape, (512, 1204)) self.assertEqual(spec_list[2].shape, (512, 3122)) spec_list = spectrogram_batch( waveform_list, window_function(512, "hann"), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec_list[0].shape, (512, 1464)) self.assertEqual(spec_list[1].shape, (512, 1204)) self.assertEqual(spec_list[2].shape, (512, 3122)) spec_list = spectrogram_batch( waveform_list, window_function(512, "hann"), frame_length=512, hop_length=512, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec_list[0].shape, (512, 183)) self.assertEqual(spec_list[1].shape, (512, 151)) self.assertEqual(spec_list[2].shape, (512, 391)) def test_mel_spectrogram(self): waveform = self._load_datasamples(1)[0] mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) spec = spectrogram( waveform, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, ) self.assertEqual(spec.shape, (513, 732)) spec = spectrogram( waveform, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, mel_filters=mel_filters, ) self.assertEqual(spec.shape, (13, 732)) # fmt: off expected = np.array([ 1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01, 8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03, 7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03, 9.44153646e-04 ]) # fmt: on self.assertTrue(np.allclose(spec[:, 300], expected)) def test_mel_spectrogram_batch(self): waveform_list = self._load_datasamples(3) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) spec_list = spectrogram_batch( waveform_list, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, ) self.assertEqual(spec_list[0].shape, (513, 732)) self.assertEqual(spec_list[1].shape, (513, 602)) self.assertEqual(spec_list[2].shape, (513, 1561)) spec_list = spectrogram_batch( waveform_list, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, mel_filters=mel_filters, ) self.assertEqual(spec_list[0].shape, (13, 732)) self.assertEqual(spec_list[1].shape, (13, 602)) self.assertEqual(spec_list[2].shape, (13, 1561)) # fmt: off expected1 = np.array([ 1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01, 8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03, 7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03, 9.44153646e-04 ]) expected2 = np.array([ 71.82577165, 109.44693334, 272.4834194, 164.90450355, 16.54056349, 11.60810547, 24.87525946, 21.07317022, 1.26736284, 1.4583074, 1.36659061, 1.76305768, 2.03703503 ]) expected3 = np.array([ 5.22246749e+02, 6.92660728e+02, 2.65895922e+02, 2.06526565e+01, 2.28692104e+00, 1.19473622e+00, 8.43228216e-01, 3.20760592e+00, 1.33654151e+00, 1.51050684e-01, 2.78282477e-01, 9.25020981e-01, 2.29908841e-01 ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][:, 300], expected1)) self.assertTrue(np.allclose(spec_list[1][:, 300], expected2)) self.assertTrue(np.allclose(spec_list[2][:, 300], expected3)) def test_spectrogram_power(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=None, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.complex64) # fmt: off expected = np.array([ 0.01452305+0.01820039j, -0.01737362-0.01641946j, 0.0121028 +0.01565081j, -0.02794554-0.03021514j, 0.04719803+0.04086519j, -0.04391563-0.02779365j, 0.05682834+0.01571325j, -0.08604821-0.02023657j, 0.07497991+0.0186641j , -0.06366091-0.00922475j, 0.11003416+0.0114788j , -0.13677941-0.01523552j, 0.10934535-0.00117226j, -0.11635598+0.02551187j, 0.14708674-0.03469823j, -0.1328196 +0.06034218j, 0.12667368-0.13973421j, -0.14764774+0.18912019j, 0.10235471-0.12181523j, -0.00773012+0.04730498j, -0.01487191-0.07312611j, -0.02739162+0.09619419j, 0.02895459-0.05398273j, 0.01198589+0.05276592j, -0.02117299-0.10123465j, 0.00666388+0.09526499j, -0.01672773-0.05649684j, 0.02723125+0.05939891j, -0.01879361-0.062954j , 0.03686557+0.04568823j, -0.07394181-0.07949649j, 0.06238583+0.13905765j, ]) # fmt: on self.assertTrue(np.allclose(spec[64:96, 321], expected)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.float64) # fmt: off expected = np.array([ 0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 , 0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579, 0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405, 0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241, 0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509, 0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678, 0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171, 0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861, 0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 , 0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615, 0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328, 0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876, 0.12322842, 0.1621659 , 0.12334293, 0.06033659, ]) # fmt: on self.assertTrue(np.allclose(spec[64:128, 321], expected)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=2.0, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.float64) # fmt: off expected = np.array([ 5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03, 3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03, 5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02, 1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02, 3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03, 5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03, 1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03, 4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02, 1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02, 1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03, 6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05, 1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04, 2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04, 2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02, 5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02, 1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03, ]) # fmt: on self.assertTrue(np.allclose(spec[64:128, 321], expected)) def test_spectrogram_batch_power(self): waveform_list = self._load_datasamples(3) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=None, ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[0].dtype, np.complex64) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[1].dtype, np.complex64) self.assertEqual(spec_list[2].shape, (257, 1561)) self.assertEqual(spec_list[2].dtype, np.complex64) # fmt: off expected1 = np.array([ 0.01452305+0.01820039j, -0.01737362-0.01641946j, 0.0121028 +0.01565081j, -0.02794554-0.03021514j, 0.04719803+0.04086519j, -0.04391563-0.02779365j, 0.05682834+0.01571325j, -0.08604821-0.02023657j, 0.07497991+0.0186641j , -0.06366091-0.00922475j, 0.11003416+0.0114788j , -0.13677941-0.01523552j, 0.10934535-0.00117226j, -0.11635598+0.02551187j, 0.14708674-0.03469823j, -0.1328196 +0.06034218j, 0.12667368-0.13973421j, -0.14764774+0.18912019j, 0.10235471-0.12181523j, -0.00773012+0.04730498j, -0.01487191-0.07312611j, -0.02739162+0.09619419j, 0.02895459-0.05398273j, 0.01198589+0.05276592j, -0.02117299-0.10123465j, 0.00666388+0.09526499j, -0.01672773-0.05649684j, 0.02723125+0.05939891j, -0.01879361-0.062954j , 0.03686557+0.04568823j, -0.07394181-0.07949649j, 0.06238583+0.13905765j, ]) expected2 = np.array([ -0.01634146-7.0067253e-03j, -0.00068403+9.2661660e-03j, 0.00571721-3.9035487e-03j, -0.00915086+1.5033451e-03j, 0.01138636+5.4256055e-03j, -0.00294282-1.2016168e-02j, -0.00428711+7.3687937e-03j, -0.001002 -1.3972387e-03j, 0.00622582+3.7551194e-03j, -0.00137886-7.0342086e-03j, -0.00824075+3.8430823e-03j, 0.0107349 +7.1450039e-03j, 0.00363763-1.4242286e-02j, -0.01499857+1.7917662e-05j, -0.0046242 +1.2500680e-02j, 0.02180984+7.2047939e-03j, -0.00273568-1.6844695e-02j, -0.00178986-7.5209686e-03j, -0.01661806+1.2662713e-03j, -0.01045276+2.0611197e-02j, 0.03252975+2.5592113e-02j, 0.03945662-6.7136563e-02j, -0.10622615+4.9393820e-03j, 0.06684612+6.4607985e-02j, -0.00753762-5.1637031e-02j, -0.00220644+1.8002450e-02j, -0.00357443-4.1291970e-03j, 0.01463647-1.4063751e-03j, -0.02252573-1.1189026e-02j, 0.00276293+1.9019062e-02j, 0.01216721+1.2095908e-03j, 0.00034753-7.4386634e-03j ]) expected3 = np.array([ 2.3276670e-02+0.0406534j, -2.4413882e-02-0.07868771j, 1.0993068e-02+0.05550544j, -1.5825305e-02+0.00480187j, 4.7617555e-02-0.04421869j, -7.1669750e-02+0.06317082j, 5.9706111e-02-0.08369736j, -2.2317577e-02+0.08915959j, -2.3291381e-02-0.06601578j, 5.9362967e-02+0.03185856j, -6.5269925e-02+0.0030586j, 5.0898481e-02-0.04319243j, -4.0413942e-02+0.08051146j, 3.0059000e-02-0.09730332j, -1.2479190e-02+0.09703682j, -6.1806822e-03-0.09617531j, 2.6907364e-02+0.08084074j, -4.1639723e-02-0.03391053j, 3.1113219e-02-0.01497662j, 3.4023849e-03+0.03632669j, -4.9804080e-02-0.039231j, 8.9777440e-02+0.02577243j, -9.2947647e-02+0.01514865j, 6.2368069e-02-0.05954866j, -2.9966677e-02+0.06520324j, -8.2365885e-05-0.0440613j , 2.0203773e-02+0.04350767j, -8.9924788e-04-0.05406843j, -3.5951469e-02+0.03055602j, 3.3790238e-02+0.02182594j, 1.0919777e-03-0.06437822j, -1.8534327e-02+0.07866792j ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][64:96, 321], expected1)) self.assertTrue(np.allclose(spec_list[1][64:96, 321], expected2)) self.assertTrue(np.allclose(spec_list[2][64:96, 321], expected3)) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[0].dtype, np.float64) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[1].dtype, np.float64) self.assertEqual(spec_list[2].shape, (257, 1561)) self.assertEqual(spec_list[2].dtype, np.float64) # fmt: off expected1 = np.array([ 0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 , 0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579, 0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405, 0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241, 0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509, 0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678, 0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171, 0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861, 0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 , 0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615, 0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328, 0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876, 0.12322842, 0.1621659 , 0.12334293, 0.06033659, ]) expected2 = np.array([ 0.01778026, 0.00929138, 0.00692273, 0.00927352, 0.01261294, 0.01237128, 0.00852516, 0.00171938, 0.00727061, 0.00716808, 0.00909281, 0.01289532, 0.01469949, 0.01499858, 0.01332855, 0.02296907, 0.01706539, 0.00773101, 0.01666623, 0.02311021, 0.0413901, 0.07787261, 0.10634092, 0.09296556, 0.05218428, 0.01813716, 0.00546139, 0.01470388, 0.02515159, 0.0192187, 0.01222719, 0.00744678, 0.01045674, 0.01923522, 0.01990819, 0.01174323, 0.01535391, 0.02786647, 0.02904595, 0.0313408 , 0.0340503, 0.03118268, 0.02915136, 0.04200513, 0.05563153, 0.05429446, 0.05021769, 0.05882667, 0.06668596, 0.06555867, 0.04523559, 0.01489498, 0.01031892, 0.02134155, 0.01736669, 0.0195216, 0.03971575, 0.03938636, 0.02052712, 0.03104931, 0.0902727, 0.09022622, 0.03275532, 0.0172633, ]) expected3 = np.array([ 0.04684551, 0.08238806, 0.05658358, 0.01653778, 0.06498249, 0.09553589, 0.10281084, 0.09191031, 0.07000408, 0.06737158, 0.06534155, 0.06675509, 0.09008541, 0.10184046, 0.09783596, 0.0963737, 0.08520112, 0.05370093, 0.03453015, 0.03648568, 0.06339967, 0.09340346, 0.09417402, 0.08623119, 0.07175977, 0.04406138, 0.04796988, 0.05407591, 0.0471824 , 0.04022626, 0.06438748, 0.0808218, 0.0745263, 0.06191467, 0.03116328, 0.03206497, 0.05867718, 0.04424652, 0.04448404, 0.07032498, 0.08300796, 0.07895744, 0.0816894, 0.09392357, 0.07571699, 0.03967651, 0.07703795, 0.06464871, 0.08704693, 0.14085226, 0.1350321, 0.18794712, 0.27043005, 0.26596246, 0.19948336, 0.06545141, 0.13204652, 0.08554521, 0.2262849, 0.33900721, 0.3970475, 0.3482436, 0.17134947, 0.46249565, ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][64:128, 321], expected1)) self.assertTrue(np.allclose(spec_list[1][64:128, 321], expected2)) self.assertTrue(np.allclose(spec_list[2][64:128, 321], expected3)) spec_list = spectrogram_batch( waveform_list, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=2.0, ) self.assertEqual(spec_list[0].shape, (257, 732)) self.assertEqual(spec_list[0].dtype, np.float64) self.assertEqual(spec_list[1].shape, (257, 602)) self.assertEqual(spec_list[1].dtype, np.float64) self.assertEqual(spec_list[2].shape, (257, 1561)) self.assertEqual(spec_list[2].dtype, np.float64) # fmt: off expected1 = np.array([ 5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03, 3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03, 5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02, 1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02, 3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03, 5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03, 1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03, 4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02, 1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02, 1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03, 6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05, 1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04, 2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04, 2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02, 5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02, 1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03, ]) expected2 = np.array([ 3.16137604e-04, 8.63297362e-05, 4.79241720e-05, 8.59982493e-05, 1.59086326e-04, 1.53048476e-04, 7.26783945e-05, 2.95627100e-06, 5.28617352e-05, 5.13813355e-05, 8.26792588e-05, 1.66289156e-04, 2.16075069e-04, 2.24957314e-04, 1.77650211e-04, 5.27578282e-04, 2.91227688e-04, 5.97685493e-05, 2.77763360e-04, 5.34081651e-04, 1.71314057e-03, 6.06414277e-03, 1.13083916e-02, 8.64259617e-03, 2.72319867e-03, 3.28956593e-04, 2.98268126e-05, 2.16204145e-04, 6.32602626e-04, 3.69358508e-04, 1.49504171e-04, 5.54544917e-05, 1.09343371e-04, 3.69993847e-04, 3.96335839e-04, 1.37903521e-04, 2.35742483e-04, 7.76540114e-04, 8.43667068e-04, 9.82245923e-04, 1.15942286e-03, 9.72359636e-04, 8.49801853e-04, 1.76443092e-03, 3.09486753e-03, 2.94788822e-03, 2.52181630e-03, 3.46057723e-03, 4.44701769e-03, 4.29793858e-03, 2.04625858e-03, 2.21860290e-04, 1.06480179e-04, 4.55461892e-04, 3.01601836e-04, 3.81092892e-04, 1.57734053e-03, 1.55128531e-03, 4.21362677e-04, 9.64059883e-04, 8.14916019e-03, 8.14077014e-03, 1.07291131e-03, 2.98021545e-04, ]) expected3 = np.array([ 0.0021945 , 0.00678779, 0.0032017 , 0.0002735 , 0.00422272, 0.00912711, 0.01057007, 0.00844751, 0.00490057, 0.00453893, 0.00426952, 0.00445624, 0.00811538, 0.01037148, 0.00957188, 0.00928789, 0.00725923, 0.00288379, 0.00119233, 0.0013312 , 0.00401952, 0.00872421, 0.00886875, 0.00743582, 0.00514946, 0.00194141, 0.00230111, 0.0029242 , 0.00222618, 0.00161815, 0.00414575, 0.00653216, 0.00555417, 0.00383343, 0.00097115, 0.00102816, 0.00344301, 0.00195775, 0.00197883, 0.0049456 , 0.00689032, 0.00623428, 0.00667316, 0.00882164, 0.00573306, 0.00157423, 0.00593485, 0.00417946, 0.00757717, 0.01983936, 0.01823367, 0.03532412, 0.07313241, 0.07073603, 0.03979361, 0.00428389, 0.01743628, 0.00731798, 0.05120486, 0.11492589, 0.15764671, 0.1212736 , 0.02936064, 0.21390222 ]) # fmt: on self.assertTrue(np.allclose(spec_list[0][64:128, 321], expected1)) self.assertTrue(np.allclose(spec_list[1][64:128, 321], expected2)) self.assertTrue(np.allclose(spec_list[2][64:128, 321], expected3)) def test_power_to_db(self): spectrogram = np.zeros((2, 3)) spectrogram[0, 0] = 2.0 spectrogram[0, 1] = 0.5 spectrogram[0, 2] = 0.707 spectrogram[1, 1] = 1.0 output = power_to_db(spectrogram, reference=1.0) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-100.0, 0.0, -100.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0) expected = np.array([[0.0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, min_value=1e-6) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-60.0, 0.0, -60.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, db_range=80) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0.0, -76.98970004]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0, db_range=80) expected = np.array([[0.0, -6.02059991, -4.51610582], [-80.0, -3.01029996, -80.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0, min_value=1e-6, db_range=80) expected = np.array([[0.0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]]) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): power_to_db(spectrogram, reference=0.0) with pytest.raises(ValueError): power_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): power_to_db(spectrogram, db_range=-80) def test_power_to_db_batch(self): # Setup a batch of spectrograms with varying values and lengths batch_spectrogram = np.zeros((3, 2, 3)) batch_spectrogram[0, 0, 0] = 2.0 batch_spectrogram[0, 0, 1] = 0.5 batch_spectrogram[0, 0, 2] = 0.707 batch_spectrogram[0, 1, 1] = 1.0 batch_spectrogram[1, :, :2] = batch_spectrogram[0, :, :2] * 1.5 batch_spectrogram[2, :, :1] = batch_spectrogram[0, :, :1] * 0.5 # Expected values computed by applying `power_to_db` iteratively output = power_to_db_batch(batch_spectrogram, reference=1.0) expected = np.array( [ [[3.01029996, -3.01029996, -1.50580586], [-100, 0, -100]], [[4.77121255, -1.24938737, -100], [-100, 1.76091259, -100]], [[0, -100, -100], [-100, -100, -100]], ] ) self.assertTrue(np.allclose(output, expected)) output = power_to_db_batch(batch_spectrogram, reference=2.0) expected = np.array( [ [[0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]], [[1.76091259, -4.25968732, -103.01029996], [-103.01029996, -1.24938737, -103.01029996]], [[-3.01029996, -103.01029996, -103.01029996], [-103.01029996, -103.01029996, -103.01029996]], ] ) self.assertTrue(np.allclose(output, expected)) output = power_to_db_batch(batch_spectrogram, min_value=1e-6) expected = np.array( [ [[3.01029996, -3.01029996, -1.50580586], [-60, 0, -60]], [[4.77121255, -1.24938737, -60], [-60, 1.76091259, -60]], [[0, -60, -60], [-60, -60, -60]], ] ) self.assertTrue(np.allclose(output, expected)) output = power_to_db_batch(batch_spectrogram, db_range=80) expected = np.array( [ [[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0, -76.98970004]], [[4.77121255, -1.24938737, -75.22878745], [-75.22878745, 1.76091259, -75.22878745]], [[0, -80, -80], [-80, -80, -80]], ] ) self.assertTrue(np.allclose(output, expected)) output = power_to_db_batch(batch_spectrogram, reference=2.0, db_range=80) expected = np.array( [ [[0, -6.02059991, -4.51610582], [-80, -3.01029996, -80]], [[1.76091259, -4.25968732, -78.23908741], [-78.23908741, -1.24938737, -78.23908741]], [[-3.01029996, -83.01029996, -83.01029996], [-83.01029996, -83.01029996, -83.01029996]], ] ) self.assertTrue(np.allclose(output, expected)) output = power_to_db_batch(batch_spectrogram, reference=2.0, min_value=1e-6, db_range=80) expected = np.array( [ [[0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]], [[1.76091259, -4.25968732, -63.01029996], [-63.01029996, -1.24938737, -63.01029996]], [[-3.01029996, -63.01029996, -63.01029996], [-63.01029996, -63.01029996, -63.01029996]], ] ) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): power_to_db_batch(batch_spectrogram, reference=0.0) with pytest.raises(ValueError): power_to_db_batch(batch_spectrogram, min_value=0.0) with pytest.raises(ValueError): power_to_db_batch(batch_spectrogram, db_range=-80) def test_amplitude_to_db(self): spectrogram = np.zeros((2, 3)) spectrogram[0, 0] = 2.0 spectrogram[0, 1] = 0.5 spectrogram[0, 2] = 0.707 spectrogram[1, 1] = 1.0 output = amplitude_to_db(spectrogram, reference=1.0) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-100.0, 0.0, -100.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0) expected = np.array([[0.0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, min_value=1e-3) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-60.0, 0.0, -60.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, db_range=80) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0.0, -73.97940009]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0, db_range=80) expected = np.array([[0.0, -12.04119983, -9.03221164], [-80.0, -6.02059991, -80.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0, min_value=1e-3, db_range=80) expected = np.array([[0.0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]]) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): amplitude_to_db(spectrogram, reference=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, db_range=-80) def test_amplitude_to_db_batch(self): # Setup a batch of spectrograms with varying values and lengths batch_spectrogram = np.zeros((3, 2, 3)) batch_spectrogram[0, 0, 0] = 2.0 batch_spectrogram[0, 0, 1] = 0.5 batch_spectrogram[0, 0, 2] = 0.707 batch_spectrogram[0, 1, 1] = 1.0 batch_spectrogram[1, :, :2] = batch_spectrogram[0, :, :2] * 1.5 batch_spectrogram[2, :, :1] = batch_spectrogram[0, :, :1] * 0.5 # Expected values computed by applying `amplitude_to_db` iteratively output = amplitude_to_db_batch(batch_spectrogram, reference=1.0) expected = np.array( [ [[6.02059991, -6.02059991, -3.01161172], [-100, 0, -100]], [[9.54242509, -2.49877473, -100], [-100, 3.52182518, -100]], [[0, -100, -100], [-100, -100, -100]], ] ) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db_batch(batch_spectrogram, reference=2.0) expected = np.array( [ [[0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]], [[3.52182518, -8.51937465, -106.02059991], [-106.02059991, -2.49877473, -106.02059991]], [[-6.02059991, -106.02059991, -106.02059991], [-106.02059991, -106.02059991, -106.02059991]], ] ) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db_batch(batch_spectrogram, min_value=1e-3) expected = np.array( [ [[6.02059991, -6.02059991, -3.01161172], [-60, 0, -60]], [[9.54242509, -2.49877473, -60], [-60, 3.52182518, -60]], [[0, -60, -60], [-60, -60, -60]], ] ) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db_batch(batch_spectrogram, db_range=80) expected = np.array( [ [[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0, -73.97940009]], [[9.54242509, -2.49877473, -70.45757491], [-70.45757491, 3.52182518, -70.45757491]], [[0, -80, -80], [-80, -80, -80]], ] ) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db_batch(batch_spectrogram, reference=2.0, db_range=80) expected = np.array( [ [[0, -12.04119983, -9.03221164], [-80, -6.02059991, -80]], [[3.52182518, -8.51937465, -76.47817482], [-76.47817482, -2.49877473, -76.47817482]], [[-6.02059991, -86.02059991, -86.02059991], [-86.02059991, -86.02059991, -86.02059991]], ] ) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db_batch(batch_spectrogram, reference=2.0, min_value=1e-3, db_range=80) expected = np.array( [ [[0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]], [[3.52182518, -8.51937465, -66.02059991], [-66.02059991, -2.49877473, -66.02059991]], [[-6.02059991, -66.02059991, -66.02059991], [-66.02059991, -66.02059991, -66.02059991]], ] ) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): amplitude_to_db_batch(batch_spectrogram, reference=0.0) with pytest.raises(ValueError): amplitude_to_db_batch(batch_spectrogram, min_value=0.0) with pytest.raises(ValueError): amplitude_to_db_batch(batch_spectrogram, db_range=-80) @require_librosa def test_chroma_equivalence(self): num_frequency_bins = 25 num_chroma = 6 sampling_rate = 24000 # test default parameters original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins) utils_chroma = chroma_filter_bank( num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate ) self.assertTrue(np.allclose(original_chroma, utils_chroma)) # test no weighting_parameters original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, octwidth=None) utils_chroma = chroma_filter_bank( num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate, weighting_parameters=None, ) self.assertTrue(np.allclose(original_chroma, utils_chroma)) # test with L1 norm original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, norm=1.0) utils_chroma = chroma_filter_bank( num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate, power=1.0 ) self.assertTrue(np.allclose(original_chroma, utils_chroma)) # test starting at 'A' chroma, power = None, tuning = 0, different weighting_parameters original_chroma = chroma( sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, norm=None, base_c=None, octwidth=1.0, ctroct=4.0, ) utils_chroma = chroma_filter_bank( num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate, power=None, start_at_c_chroma=False, weighting_parameters=(4.0, 1.0), ) self.assertTrue(np.allclose(original_chroma, utils_chroma))
transformers/tests/utils/test_audio_utils.py/0
{ "file_path": "transformers/tests/utils/test_audio_utils.py", "repo_id": "transformers", "token_count": 46035 }
601
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import hf_hub_download from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, has_file, ) RANDOM_BERT = "hf-internal-testing/tiny-random-bert" TINY_BERT_PT_ONLY = "hf-internal-testing/tiny-bert-pt-only" CACHE_DIR = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") FULL_COMMIT_HASH = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" GATED_REPO = "hf-internal-testing/dummy-gated-model" README_FILE = "README.md" class GetFromCacheTests(unittest.TestCase): def test_cached_file(self): archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) # Should have downloaded the file in here self.assertTrue(os.path.isdir(CACHE_DIR)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(CACHE_DIR, subfolder))) with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", main_commit, CONFIG_NAME)) self.assertTrue(os.path.isfile(archive_file)) # File is cached at the same place the second time. new_archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) self.assertEqual(archive_file, new_archive_file) # Using a specific revision to test the full commit hash. archive_file = cached_file(RANDOM_BERT, CONFIG_NAME, revision="9b8c223") self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", FULL_COMMIT_HASH, CONFIG_NAME)) def test_cached_file_errors(self): with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): _ = cached_file("tiny-random-bert", CONFIG_NAME) with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): _ = cached_file(RANDOM_BERT, CONFIG_NAME, revision="aaaa") with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") def test_non_existence_is_cached(self): with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, ".no_exist", main_commit, "conf"))) path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) # Under the mock environment, hf_hub_download will always raise an HTTPError with mock.patch("transformers.utils.hub.hf_hub_download", side_effect=HTTPError) as mock_head: path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False) self.assertIsNone(path) # This check we did call the fake head request mock_head.assert_called() def test_has_file(self): self.assertTrue(has_file(TINY_BERT_PT_ONLY, WEIGHTS_NAME)) self.assertFalse(has_file(TINY_BERT_PT_ONLY, TF2_WEIGHTS_NAME)) self.assertFalse(has_file(TINY_BERT_PT_ONLY, FLAX_WEIGHTS_NAME)) def test_has_file_in_cache(self): with tempfile.TemporaryDirectory() as tmp_dir: # Empty cache dir + offline mode => return False assert not has_file(TINY_BERT_PT_ONLY, WEIGHTS_NAME, local_files_only=True, cache_dir=tmp_dir) # Populate cache dir hf_hub_download(TINY_BERT_PT_ONLY, WEIGHTS_NAME, cache_dir=tmp_dir) # Cache dir + offline mode => return True assert has_file(TINY_BERT_PT_ONLY, WEIGHTS_NAME, local_files_only=True, cache_dir=tmp_dir) def test_get_file_from_repo_distant(self): # should return None if the file does not exist self.assertIsNone( cached_file( "google-bert/bert-base-cased", "ahah.txt", _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): cached_file( "bert-base-case", CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) # The function raises if the revision does not exist. with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): cached_file( "google-bert/bert-base-cased", CONFIG_NAME, revision="ahaha", _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) resolved_file = cached_file( "google-bert/bert-base-cased", CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) # The name is the cached name which is not very easy to test, so instead we load the content. config = json.loads(open(resolved_file).read()) self.assertEqual(config["hidden_size"], 768) def test_get_file_from_repo_local(self): with tempfile.TemporaryDirectory() as tmp_dir: filename = Path(tmp_dir) / "a.txt" filename.touch() self.assertEqual( cached_file( tmp_dir, "a.txt", _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ), str(filename), ) self.assertIsNone( cached_file( tmp_dir, "b.txt", _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) ) def test_get_file_gated_repo(self): """Test download file from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "You are trying to access a gated repo."): # All files except README.md are protected on a gated repo. cached_file(GATED_REPO, "gated_file.txt", token=False) def test_has_file_gated_repo(self): """Test check file existence from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "is a gated repository"): # All files except README.md are protected on a gated repo. has_file(GATED_REPO, "gated_file.txt", token=False) def test_cached_files_exception_raised(self): """Test that unhadled exceptions, e.g. ModuleNotFoundError, is properly re-raised by cached_files when hf_hub_download fails.""" with mock.patch( "transformers.utils.hub.hf_hub_download", side_effect=ModuleNotFoundError("No module named 'MockModule'") ): with self.assertRaises(ModuleNotFoundError): # The error should be re-raised by cached_files, not caught in the exception handling block cached_file(RANDOM_BERT, "nonexistent.json")
transformers/tests/utils/test_hub_utils.py/0
{ "file_path": "transformers/tests/utils/test_hub_utils.py", "repo_id": "transformers", "token_count": 3904 }
602
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_torch_available, is_vision_available from transformers.image_processing_utils import get_size_dict from transformers.image_utils import SizeDict from transformers.processing_utils import VideosKwargs from transformers.testing_utils import ( require_av, require_cv2, require_decord, require_torch, require_torchcodec, require_torchvision, require_vision, ) from transformers.video_utils import group_videos_by_shape, make_batched_videos, reorder_videos if is_torch_available(): import torch if is_vision_available(): import PIL from transformers import BaseVideoProcessor from transformers.video_utils import VideoMetadata, load_video def get_random_video(height, width, num_frames=8, return_torch=False): random_frame = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8) video = np.array([random_frame] * num_frames) if return_torch: # move channel first return torch.from_numpy(video).permute(0, 3, 1, 2) return video @require_vision @require_torchvision class BaseVideoProcessorTester(unittest.TestCase): """ Tests that the `transforms` can be applied to a 4-dim array directly, i.e. to a whole video. """ def test_make_batched_videos_pil(self): # Test a single image is converted to a list of 1 video with 1 frame video = get_random_video(16, 32) pil_image = PIL.Image.fromarray(video[0]) videos_list = make_batched_videos(pil_image) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (1, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0][0], np.array(pil_image))) # Test a list of videos is converted to a list of 1 video video = get_random_video(16, 32) pil_video = [PIL.Image.fromarray(frame) for frame in video] videos_list = make_batched_videos(pil_video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) # Test a nested list of videos is not modified video = get_random_video(16, 32) pil_video = [PIL.Image.fromarray(frame) for frame in video] videos = [pil_video, pil_video] videos_list = make_batched_videos(videos) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) def test_make_batched_videos_numpy(self): # Test a single image is converted to a list of 1 video with 1 frame video = get_random_video(16, 32)[0] videos_list = make_batched_videos(video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (1, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0][0], video)) # Test a 4d array of videos is converted to a a list of 1 video video = get_random_video(16, 32) videos_list = make_batched_videos(video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) # Test a list of videos is converted to a list of videos video = get_random_video(16, 32) videos = [video, video] videos_list = make_batched_videos(videos) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) @require_torch def test_make_batched_videos_torch(self): # Test a single image is converted to a list of 1 video with 1 frame video = get_random_video(16, 32)[0] torch_video = torch.from_numpy(video) videos_list = make_batched_videos(torch_video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (1, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0][0], video)) # Test a 4d array of videos is converted to a a list of 1 video video = get_random_video(16, 32) torch_video = torch.from_numpy(video) videos_list = make_batched_videos(torch_video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], torch.Tensor) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) # Test a list of videos is converted to a list of videos video = get_random_video(16, 32) torch_video = torch.from_numpy(video) videos = [torch_video, torch_video] videos_list = make_batched_videos(videos) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], torch.Tensor) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) def test_resize(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) video = get_random_video(16, 32, return_torch=True) # Size can be an int or a tuple of ints. size_dict = SizeDict(**get_size_dict((8, 8), param_name="size")) resized_video = video_processor.resize(video, size=size_dict) self.assertIsInstance(resized_video, torch.Tensor) self.assertEqual(resized_video.shape, (8, 3, 8, 8)) def test_normalize(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) array = torch.randn(4, 3, 16, 32) mean = [0.1, 0.5, 0.9] std = [0.2, 0.4, 0.6] # mean and std can be passed as lists or NumPy arrays. expected = (array - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None] normalized_array = video_processor.normalize(array, mean, std) torch.testing.assert_close(normalized_array, expected) def test_center_crop(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) video = get_random_video(16, 32, return_torch=True) # Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions. crop_sizes = [8, (8, 64), 20, (32, 64)] for size in crop_sizes: size_dict = SizeDict(**get_size_dict(size, default_to_square=True, param_name="crop_size")) cropped_video = video_processor.center_crop(video, size_dict) self.assertIsInstance(cropped_video, torch.Tensor) expected_size = (size, size) if isinstance(size, int) else size self.assertEqual(cropped_video.shape, (8, 3, *expected_size)) def test_convert_to_rgb(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) video = get_random_video(20, 20, return_torch=True) rgb_video = video_processor.convert_to_rgb(video[:, :1]) self.assertEqual(rgb_video.shape, (8, 3, 20, 20)) rgb_video = video_processor.convert_to_rgb(torch.cat([video, video[:, :1]], dim=1)) self.assertEqual(rgb_video.shape, (8, 3, 20, 20)) def test_group_and_reorder_videos(self): """Tests that videos can be grouped by frame size and number of frames""" video_1 = get_random_video(20, 20, num_frames=3, return_torch=True) video_2 = get_random_video(20, 20, num_frames=5, return_torch=True) # Group two videos of same size but different number of frames grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_2]) self.assertEqual(len(grouped_videos), 2) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 2) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group two videos of different size but same number of frames video_3 = get_random_video(15, 20, num_frames=3, return_torch=True) grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_3]) self.assertEqual(len(grouped_videos), 2) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 2) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group all three videos where some have same size or same frame count # But since none have frames and sizes identical, we'll have 3 groups grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_2, video_3]) self.assertEqual(len(grouped_videos), 3) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 3) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group if we had some videos with identical shapes grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_1, video_3]) self.assertEqual(len(grouped_videos), 2) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 2) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group if we had all videos with identical shapes grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_1, video_1]) self.assertEqual(len(grouped_videos), 1) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 1) self.assertEqual(video_1.shape, regrouped_videos[0].shape) @require_vision @require_av class LoadVideoTester(unittest.TestCase): def test_load_video_url(self): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", ) self.assertEqual(video.shape, (243, 360, 640, 3)) # 243 frames is the whole video, no sampling applied def test_load_video_local(self): video_file_path = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset" ) video, _ = load_video(video_file_path) self.assertEqual(video.shape, (243, 360, 640, 3)) # 243 frames is the whole video, no sampling applied # FIXME: @raushan, yt-dlp downloading works for for some reason it cannot redirect to out buffer? # @requires_yt_dlp # def test_load_video_youtube(self): # video = load_video("https://www.youtube.com/watch?v=QC8iQqtG0hg") # self.assertEqual(video.shape, (243, 360, 640, 3)) # 243 frames is the whole video, no sampling applied @require_decord @require_torchvision @require_torchcodec @require_cv2 def test_load_video_backend_url(self): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", backend="decord", ) self.assertEqual(video.shape, (243, 360, 640, 3)) video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", backend="torchcodec", ) self.assertEqual(video.shape, (243, 360, 640, 3)) # Can't use certain backends with url with self.assertRaises(ValueError): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", backend="opencv", ) with self.assertRaises(ValueError): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", backend="torchvision", ) @require_decord @require_torchvision @require_torchcodec @require_cv2 def test_load_video_backend_local(self): video_file_path = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset" ) video, metadata = load_video(video_file_path, backend="decord") self.assertEqual(video.shape, (243, 360, 640, 3)) self.assertIsInstance(metadata, VideoMetadata) video, metadata = load_video(video_file_path, backend="opencv") self.assertEqual(video.shape, (243, 360, 640, 3)) self.assertIsInstance(metadata, VideoMetadata) video, metadata = load_video(video_file_path, backend="torchvision") self.assertEqual(video.shape, (243, 360, 640, 3)) self.assertIsInstance(metadata, VideoMetadata) video, metadata = load_video(video_file_path, backend="torchcodec") self.assertEqual(video.shape, (243, 360, 640, 3)) self.assertIsInstance(metadata, VideoMetadata) def test_load_video_num_frames(self): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", num_frames=16, ) self.assertEqual(video.shape, (16, 360, 640, 3)) video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", num_frames=22, ) self.assertEqual(video.shape, (22, 360, 640, 3)) def test_load_video_fps(self): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", fps=1 ) self.assertEqual(video.shape, (9, 360, 640, 3)) video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", fps=2 ) self.assertEqual(video.shape, (19, 360, 640, 3)) # `num_frames` is mutually exclusive with `video_fps` with self.assertRaises(ValueError): video, _ = load_video( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", fps=1, num_frames=10, )
transformers/tests/utils/test_video_utils.py/0
{ "file_path": "transformers/tests/utils/test_video_utils.py", "repo_id": "transformers", "token_count": 6471 }
603
import re from transformers.pipelines import SUPPORTED_TASKS, Pipeline HEADER = """ # fmt: off # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # The part of the file below was automatically generated from the code. # Do NOT edit this part of the file manually as any edits will be overwritten by the generation # of the file. If any change should be done, please apply the changes to the `pipeline` function # below and run `python utils/check_pipeline_typing.py --fix_and_overwrite` to update the file. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 from typing import Literal, overload """ FOOTER = """ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # The part of the file above was automatically generated from the code. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # fmt: on """ TASK_PATTERN = "task: Optional[str] = None" def main(pipeline_file_path: str, fix_and_overwrite: bool = False): with open(pipeline_file_path, "r") as file: content = file.read() # extract generated code in between <generated-code> and </generated-code> current_generated_code = re.search(r"# <generated-code>(.*)# </generated-code>", content, re.DOTALL).group(1) content_without_generated_code = content.replace(current_generated_code, "") # extract pipeline signature in between `def pipeline` and `-> Pipeline` pipeline_signature = re.search(r"def pipeline(.*) -> Pipeline:", content_without_generated_code, re.DOTALL).group( 1 ) pipeline_signature = pipeline_signature.replace("(\n ", "(") # start of the signature pipeline_signature = pipeline_signature.replace(",\n ", ", ") # intermediate arguments pipeline_signature = pipeline_signature.replace(",\n)", ")") # end of the signature # collect and sort available pipelines pipelines = [(f'"{task}"', task_info["impl"]) for task, task_info in SUPPORTED_TASKS.items()] pipelines = sorted(pipelines, key=lambda x: x[0]) pipelines.insert(0, (None, Pipeline)) # generate new `pipeline` signatures new_generated_code = "" for task, pipeline_class in pipelines: if TASK_PATTERN not in pipeline_signature: raise ValueError(f"Can't find `{TASK_PATTERN}` in pipeline signature: {pipeline_signature}") pipeline_type = pipeline_class if isinstance(pipeline_class, str) else pipeline_class.__name__ new_pipeline_signature = pipeline_signature.replace(TASK_PATTERN, f"task: Literal[{task}]") new_generated_code += f"@overload\ndef pipeline{new_pipeline_signature} -> {pipeline_type}: ...\n" new_generated_code = HEADER + new_generated_code + FOOTER new_generated_code = new_generated_code.rstrip("\n") + "\n" if new_generated_code != current_generated_code and fix_and_overwrite: print(f"Updating {pipeline_file_path}...") wrapped_current_generated_code = "# <generated-code>" + current_generated_code + "# </generated-code>" wrapped_new_generated_code = "# <generated-code>" + new_generated_code + "# </generated-code>" content = content.replace(wrapped_current_generated_code, wrapped_new_generated_code) # write content to file with open(pipeline_file_path, "w") as file: file.write(content) elif new_generated_code != current_generated_code and not fix_and_overwrite: message = ( f"Found inconsistencies in {pipeline_file_path}. " "Run `python utils/check_pipeline_typing.py --fix_and_overwrite` to fix them." ) raise ValueError(message) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") parser.add_argument( "--pipeline_file_path", type=str, default="src/transformers/pipelines/__init__.py", help="Path to the pipeline file.", ) args = parser.parse_args() main(args.pipeline_file_path, args.fix_and_overwrite)
transformers/utils/check_pipeline_typing.py/0
{ "file_path": "transformers/utils/check_pipeline_typing.py", "repo_id": "transformers", "token_count": 2097 }
604
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") modified_files = ( subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split() ) joined_dirs = "|".join(sys.argv[1:]) regex = re.compile(rf"^({joined_dirs}).*?\.py$") relevant_modified_files = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
transformers/utils/get_modified_files.py/0
{ "file_path": "transformers/utils/get_modified_files.py", "repo_id": "transformers", "token_count": 448 }
605
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import requests if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--workflow_id", type=str, required=True) args = parser.parse_args() workflow_id = args.workflow_id r = requests.get( f"https://circleci.com/api/v2/workflow/{workflow_id}/job", headers={"Circle-Token": os.environ.get("CIRCLE_TOKEN", "")}, ) jobs = r.json()["items"] os.makedirs("outputs", exist_ok=True) workflow_summary = {} # for each job, download artifacts for job in jobs: project_slug = job["project_slug"] if job["name"].startswith(("tests_", "examples_", "pipelines_")): url = f"https://circleci.com/api/v2/project/{project_slug}/{job['job_number']}/artifacts" r = requests.get(url, headers={"Circle-Token": os.environ.get("CIRCLE_TOKEN", "")}) job_artifacts = r.json()["items"] os.makedirs(job["name"], exist_ok=True) os.makedirs(f"outputs/{job['name']}", exist_ok=True) job_test_summaries = {} for artifact in job_artifacts: if artifact["path"].startswith("reports/") and artifact["path"].endswith("/summary_short.txt"): node_index = artifact["node_index"] url = artifact["url"] r = requests.get(url, headers={"Circle-Token": os.environ.get("CIRCLE_TOKEN", "")}) test_summary = r.text job_test_summaries[node_index] = test_summary summary = {} for node_index, node_test_summary in job_test_summaries.items(): for line in node_test_summary.splitlines(): if line.startswith("PASSED "): test = line[len("PASSED ") :] summary[test] = "passed" elif line.startswith("FAILED "): test = line[len("FAILED ") :].split()[0] summary[test] = "failed" # failed before passed summary = dict(sorted(summary.items(), key=lambda x: (x[1], x[0]))) workflow_summary[job["name"]] = summary # collected version with open(f"outputs/{job['name']}/test_summary.json", "w") as fp: json.dump(summary, fp, indent=4) new_workflow_summary = {} for job_name, job_summary in workflow_summary.items(): for test, status in job_summary.items(): if test not in new_workflow_summary: new_workflow_summary[test] = {} new_workflow_summary[test][job_name] = status for test, result in new_workflow_summary.items(): new_workflow_summary[test] = dict(sorted(result.items())) new_workflow_summary = dict(sorted(new_workflow_summary.items())) with open("outputs/test_summary.json", "w") as fp: json.dump(new_workflow_summary, fp, indent=4)
transformers/utils/process_circleci_workflow_test_reports.py/0
{ "file_path": "transformers/utils/process_circleci_workflow_test_reports.py", "repo_id": "transformers", "token_count": 1573 }
606
from transformers import BertTokenizer class CustomTokenizer(BertTokenizer): pass
transformers/utils/test_module/custom_tokenization.py/0
{ "file_path": "transformers/utils/test_module/custom_tokenization.py", "repo_id": "transformers", "token_count": 25 }
607
# Unsloth Integration <Tip warning={true}> Section under construction. Feel free to contribute! </Tip> Unsloth is an open‑source framework for fine‑tuning and reinforcement learning that trains LLMs (like Llama, Mistral, Gemma, DeepSeek, and more) up to 2× faster with up to 70% less VRAM, while providing a streamlined, Hugging Face–compatible workflow for training, evaluation, and deployment. Unsloth library that is fully compatible with [`SFTTrainer`]. Some benchmarks on 1 x A100 listed below: | 1 A100 40GB | Dataset | 🤗 | 🤗 + FlashAttention 2 | 🦥 Unsloth | 🦥 VRAM saved | | --------------- | --------- | --- | --------------------- | --------- | ------------ | | Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% | | Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% | | Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% | | Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% | First, install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading [`~transformers.AutoModelForCausalLM`], you just need to load a `FastLanguageModel` as follows: ```python import torch from trl import SFTConfig, SFTTrainer from unsloth import FastLanguageModel max_length = 2048 # Supports automatic RoPE Scaling, so choose any number # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name="unsloth/mistral-7b", max_seq_length=max_length, dtype=None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit=True, # Use 4bit quantization to reduce memory usage. Can be False ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r=16, target_modules=[ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ], lora_alpha=16, lora_dropout=0, # Dropout = 0 is currently optimized bias="none", # Bias = "none" is currently optimized use_gradient_checkpointing=True, random_state=3407, ) training_args = SFTConfig(output_dir="./output", max_length=max_length) trainer = SFTTrainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth).
trl/docs/source/unsloth_integration.md/0
{ "file_path": "trl/docs/source/unsloth_integration.md", "repo_id": "trl", "token_count": 1037 }
608
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import ModelCard from transformers import AutoTokenizer, HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-lib/lm-human-preferences-descriptiveness"`): Hugging Face repository ID to push the dataset to. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of workers to use for dataset processing. """ push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-lib/lm-human-preferences-descriptiveness", metadata={"help": "Hugging Face repository ID to push the dataset to."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of workers to use for dataset processing."}, ) # Edge cases handling: remove the cases where all samples are the same def samples_not_all_same(example): return not all(example["sample0"] == example[f"sample{j}"] for j in range(1, 4)) def to_prompt_completion(example, tokenizer): prompt = tokenizer.decode(example["query"]).strip() best_idx = example["best"] chosen = tokenizer.decode(example[f"sample{best_idx}"]) for rejected_idx in range(4): # take the first rejected sample that is different from the chosen one rejected = tokenizer.decode(example[f"sample{rejected_idx}"]) if chosen != rejected: break assert chosen != rejected return {"prompt": prompt, "chosen": chosen, "rejected": rejected} model_card = ModelCard(""" --- tags: [trl] --- # LM-Human-Preferences-Descriptiveness Dataset ## Summary The LM-Human-Preferences-Descriptiveness dataset is a processed subset of [OpenAI's LM-Human-Preferences](https://github.com/openai/lm-human-preferences), focusing specifically on enhancing the descriptiveness of generated text. It contains pairs of text samples, each labeled as either "chosen" or "rejected," based on human preferences regarding the level of detail and vividness in the descriptions. This dataset enables models to learn human preferences in descriptive language, improving their ability to generate rich and engaging narratives. ## Data Structure - **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard) - **Type**: [Preference](https://huggingface.co/docs/trl/main/dataset_formats#preference) Columns: - `"prompt"`: The text sample. - `"chosen"`: A version of the text with enhanced descriptiveness. - `"rejected"`: A version of the text with less descriptiveness. This structure allows models to learn to prefer the _chosen_ response over the _rejected_ one, thereby aligning with human preferences in descriptive language. ## Generation script The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/lm-human-preferences-descriptiveness.py). """) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] dataset = load_dataset( "json", data_files="https://openaipublic.blob.core.windows.net/lm-human-preferences/labels/descriptiveness/offline_5k.json", split="train", ) dataset = dataset.filter(samples_not_all_same, num_proc=script_args.dataset_num_proc) dataset = dataset.map( to_prompt_completion, num_proc=script_args.dataset_num_proc, remove_columns=["query", "sample0", "sample1", "sample2", "sample3", "best"], fn_kwargs={"tokenizer": AutoTokenizer.from_pretrained("gpt2")}, ) # train_size taken from https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/launch.py#L79) dataset = dataset.train_test_split(train_size=4992) if script_args.push_to_hub: dataset.push_to_hub(script_args.repo_id) model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
trl/examples/datasets/lm-human-preferences-descriptiveness.py/0
{ "file_path": "trl/examples/datasets/lm-human-preferences-descriptiveness.py", "repo_id": "trl", "token_count": 1690 }
609
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import whoami model_name = "unsloth/Llama-3.2-3B" tokenizer_name = "unsloth/Llama-3.2-3B" dataset_name = "WillHeld/top_v2" output_root_dir = "./checkpoints/" hub_model_id = f"{whoami()['name']}/layerskip-{model_name.split('/')[1]}-{dataset_name.split('/')[1]}" output_dir = f"{output_root_dir}/{hub_model_id}" per_device_train_batch_size = 8 gradient_accumulation_steps = 1 learning_rate = 2e-5
trl/examples/research_projects/layer_skip/scripts/config.py/0
{ "file_path": "trl/examples/research_projects/layer_skip/scripts/config.py", "repo_id": "trl", "token_count": 348 }
610
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # ] # /// """ Run the BCO training script with the commands below. In general, the optimal configuration for BCO will be similar to that of KTO. # Full training: python examples/scripts/bco.py \ --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct \ --trust_remote_code \ --dataset_name trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 32 \ --num_train_epochs 1 \ --learning_rate 1e-6 \ --gradient_checkpointing \ --gradient_accumulation_steps 1 \ --eval_steps 0.2 \ --save_strategy no \ --output_dir=bco-aligned-model \ --logging_first_step \ --max_length 2048 \ --max_prompt_length 1536 \ --max_completion_length 1024 \ --no_remove_unused_columns \ --warmup_ratio 0.1 \ --report_to wandb # QLoRA: python examples/scripts/bco.py \ --model_name_or_path=nnheui/stablelm-2-1_6b-sft-full \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 32 \ --num_train_epochs 1 \ --learning_rate 1e-6 \ --gradient_checkpointing \ --gradient_accumulation_steps 1 \ --eval_steps 0.2 \ --save_strategy no \ --output_dir=bco-aligned-model-lora \ --logging_first_step \ --warmup_ratio 0.1 \ --report_to wandb \ --max_length 2048 \ --max_prompt_length 1536 \ --max_completion_length 1024 \ --no_remove_unused_columns \ --warmup_ratio 0.1 \ --use_peft \ --load_in_4bit \ --lora_target_modules=all-linear \ --lora_r=16 \ --lora_alpha=16 """ from functools import partial import torch import torch.nn.functional as F from accelerate import Accelerator from datasets import load_dataset from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, PreTrainedModel from trl import BCOConfig, BCOTrainer, ModelConfig, ScriptArguments, get_peft_config def embed_prompt(input_ids: torch.LongTensor, attention_mask: torch.LongTensor, model: PreTrainedModel): """ Borrowed from https://huggingface.co/nomic-ai/nomic-embed-text-v1.5#transformers """ def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) with torch.no_grad(): model_output = model(input_ids=input_ids, attention_mask=attention_mask) embeddings = mean_pooling(model_output, attention_mask) matryoshka_dim = 512 # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) embeddings = F.layer_norm(embeddings, normalized_shape=(embeddings.shape[1],)) embeddings = embeddings[:, :matryoshka_dim] return embeddings if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, BCOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() training_args.gradient_checkpointing_kwargs = {"use_reentrant": True} # Load a pretrained model model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) ref_model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) accelerator = Accelerator() embedding_model = AutoModel.from_pretrained( "nomic-ai/nomic-embed-text-v1.5", trust_remote_code=model_args.trust_remote_code, safe_serialization=True, torch_dtype=torch.bfloat16, device_map="auto", ) embedding_model = accelerator.prepare_model(embedding_model) embedding_tokenizer = AutoTokenizer.from_pretrained( "bert-base-uncased", trust_remote_code=model_args.trust_remote_code ) embedding_func = partial( embed_prompt, model=embedding_model, ) # Initialize the BCO trainer trainer = BCOTrainer( model, ref_model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), embedding_func=embedding_func, embedding_tokenizer=embedding_tokenizer, ) # Train and push the model to the Hub trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/bco.py/0
{ "file_path": "trl/examples/scripts/bco.py", "repo_id": "trl", "token_count": 2270 }
611
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # ] # /// import shutil import torch from accelerate import PartialState from datasets import load_dataset from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, ) from trl import ( ModelConfig, PPOConfig, PPOTrainer, ScriptArguments, get_kbit_device_map, get_peft_config, get_quantization_config, ) from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE """ python examples/scripts/ppo/ppo_tldr.py \ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ --dataset_test_split validation \ --learning_rate 3e-6 \ --output_dir models/minimal/ppo_tldr \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 64 \ --total_episodes 30000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ --missing_eos_penalty 1.0 \ --stop_token eos \ --response_length 53 \ --eval_strategy steps \ --eval_steps 100 accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ examples/scripts/ppo/ppo_tldr.py \ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ --dataset_test_split validation \ --output_dir models/minimal/ppo_tldr \ --learning_rate 3e-6 \ --per_device_train_batch_size 16 \ --gradient_accumulation_steps 4 \ --total_episodes 1000000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ --local_rollout_forward_batch_size 16 \ --missing_eos_penalty 1.0 \ --stop_token eos \ --eval_strategy steps \ --eval_steps 100 """ if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, PPOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() # remove output_dir if exists shutil.rmtree(training_args.output_dir, ignore_errors=True) ################ # Model & Tokenizer ################ torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, padding_side="left", trust_remote_code=model_args.trust_remote_code ) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE value_model = AutoModelForSequenceClassification.from_pretrained( training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1 ) reward_model = AutoModelForSequenceClassification.from_pretrained( training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1 ) policy = AutoModelForCausalLM.from_pretrained( training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code ) peft_config = get_peft_config(model_args) if peft_config is None: ref_policy = AutoModelForCausalLM.from_pretrained( training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code ) else: ref_policy = None ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) train_dataset = dataset[script_args.dataset_train_split] eval_dataset = dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None def prepare_dataset(dataset, tokenizer): """pre-tokenize the dataset before training; only collate during training""" def tokenize(element): input_ids = tokenizer.apply_chat_template( element["messages"][:1], padding=False, add_generation_prompt=True, ) return {"input_ids": input_ids, "lengths": len(input_ids)} return dataset.map( tokenize, remove_columns=dataset.column_names, num_proc=training_args.dataset_num_proc, ) # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): train_dataset = prepare_dataset(train_dataset, tokenizer) if eval_dataset is not None: eval_dataset = prepare_dataset(eval_dataset, tokenizer) # filtering train_dataset = train_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) if eval_dataset is not None: eval_dataset = eval_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token" ################ # Training ################ trainer = PPOTrainer( args=training_args, processing_class=tokenizer, model=policy, ref_model=ref_policy, reward_model=reward_model, value_model=value_model, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, ) trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) trainer.generate_completions()
trl/examples/scripts/ppo/ppo_tldr.py/0
{ "file_path": "trl/examples/scripts/ppo/ppo_tldr.py", "repo_id": "trl", "token_count": 2781 }
612
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script generates tiny models used in the TRL library for unit tests. It pushes them to the Hub under the # `trl-internal-testing` organization. # This script is meant to be run when adding new tiny model to the TRL library. import torch from huggingface_hub import HfApi, ModelCard from torch import nn from transformers import ( AutoConfig, AutoProcessor, AutoTokenizer, BartConfig, BartModel, BloomConfig, BloomForCausalLM, CohereConfig, CohereForCausalLM, DbrxConfig, DbrxForCausalLM, DeepseekV3Config, DeepseekV3ForCausalLM, FalconMambaConfig, FalconMambaForCausalLM, Gemma2Config, Gemma2ForCausalLM, Gemma3ForConditionalGeneration, GemmaConfig, GemmaForCausalLM, GPT2Config, GPT2LMHeadModel, GPTNeoXConfig, GPTNeoXForCausalLM, GptOssConfig, GptOssForCausalLM, Idefics2Config, Idefics2ForConditionalGeneration, Idefics3ForConditionalGeneration, InternVLForConditionalGeneration, LlamaConfig, LlamaForCausalLM, LlamaForSequenceClassification, LlavaForConditionalGeneration, LlavaNextForConditionalGeneration, MistralConfig, MistralForCausalLM, OPTConfig, OPTForCausalLM, PaliGemmaForConditionalGeneration, Phi3Config, Phi3ForCausalLM, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Config, Qwen2ForCausalLM, Qwen2ForSequenceClassification, Qwen2VLConfig, Qwen2VLForConditionalGeneration, Qwen3Config, Qwen3ForCausalLM, Qwen3ForSequenceClassification, Qwen3MoeConfig, Qwen3MoeForCausalLM, SmolVLMForConditionalGeneration, T5Config, T5ForConditionalGeneration, ) ORGANIZATION = "trl-internal-testing" MODEL_CARD = """ --- library_name: transformers tags: [trl] --- # Tiny {model_class_name} This is a minimal model built for unit tests in the [TRL](https://github.com/huggingface/trl) library. """ api = HfApi() def push_to_hub(model, tokenizer, prefix=None, suffix=None, force=False): model_class_name = model.__class__.__name__ content = MODEL_CARD.format(model_class_name=model_class_name) model_card = ModelCard(content) if prefix is not None: model_class_name = f"{prefix}-{model_class_name}" repo_id = f"{ORGANIZATION}/{model_class_name}" if suffix is not None: repo_id += f"-{suffix}" if api.repo_exists(repo_id) and not force: print(f"Model {repo_id} already exists, skipping") else: model.push_to_hub(repo_id) tokenizer.push_to_hub(repo_id) model_card.push_to_hub(repo_id) def init_weights_tiny_model(model): """ Initialize tiny test models to avoid NaNs from uninitialized weights. Uses safe defaults: - Linear/Conv1d: Xavier uniform (weights), zero (biases) - Embedding: Normal(0, 0.02) - LayerNorm: Ones (weights), zero (biases) Args: model: PyTorch model (modified in-place) """ for module in model.modules(): if isinstance(module, nn.Linear): # Attention/MLP projections → Xavier or Normal if module.bias is not None: nn.init.zeros_(module.bias) nn.init.xavier_uniform_(module.weight) elif isinstance(module, nn.Embedding): # Token embeddings → GPT-style Normal nn.init.normal_(module.weight, mean=0.0, std=0.02) elif isinstance(module, nn.LayerNorm): # LayerNorm weights always 1, bias 0 nn.init.ones_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv1d): # Convolutional layers → Xavier or Normal if module.bias is not None: nn.init.zeros_(module.bias) nn.init.xavier_uniform_(module.weight) # Decoder models for model_id, config_class, model_class, suffix in [ ("bigscience/bloomz-560m", BloomConfig, BloomForCausalLM, None), ("CohereForAI/aya-expanse-8b", CohereConfig, CohereForCausalLM, None), ("databricks/dbrx-instruct", DbrxConfig, DbrxForCausalLM, None), ("deepseek-ai/DeepSeek-R1", DeepseekV3Config, DeepseekV3ForCausalLM, None), # It's important to have R1-0528 as it doesn't have the same chat template ("deepseek-ai/DeepSeek-R1-0528", DeepseekV3Config, DeepseekV3ForCausalLM, "0528"), ("tiiuae/falcon-7b-instruct", FalconMambaConfig, FalconMambaForCausalLM, None), ("google/gemma-2-2b-it", Gemma2Config, Gemma2ForCausalLM, None), ("google/gemma-7b-it", GemmaConfig, GemmaForCausalLM, None), ("openai-community/gpt2", GPT2Config, GPT2LMHeadModel, None), ("EleutherAI/pythia-14m", GPTNeoXConfig, GPTNeoXForCausalLM, None), ("meta-llama/Meta-Llama-3-8B-Instruct", LlamaConfig, LlamaForCausalLM, "3"), ("meta-llama/Llama-3.1-8B-Instruct", LlamaConfig, LlamaForCausalLM, "3.1"), ("meta-llama/Llama-3.2-1B-Instruct", LlamaConfig, LlamaForCausalLM, "3.2"), ("mistralai/Mistral-7B-Instruct-v0.1", MistralConfig, MistralForCausalLM, "0.1"), ("mistralai/Mistral-7B-Instruct-v0.2", MistralConfig, MistralForCausalLM, "0.2"), ("facebook/opt-1.3b", OPTConfig, OPTForCausalLM, None), ("microsoft/Phi-3.5-mini-instruct", Phi3Config, Phi3ForCausalLM, None), ("Qwen/Qwen2.5-32B-Instruct", Qwen2Config, Qwen2ForCausalLM, "2.5"), ("Qwen/Qwen2.5-Coder-0.5B", Qwen2Config, Qwen2ForCausalLM, "2.5-Coder"), ("Qwen/Qwen3-8B", Qwen3Config, Qwen3ForCausalLM, None), ]: revision = "refs/pr/14" if model_id == "Qwen/Qwen3-8B" else "main" # chat template with {% generation %} tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision) config = config_class( vocab_size=len(tokenizer.vocab), hidden_size=8, num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, ) model = model_class(config) init_weights_tiny_model(model) push_to_hub(model, tokenizer, "tiny", suffix) # MoE models for model_id, config_class, model_class, suffix in [ ("Qwen/Qwen3-30B-A3B", Qwen3MoeConfig, Qwen3MoeForCausalLM, None), ("openai/gpt-oss-20b", GptOssConfig, GptOssForCausalLM, None), ]: tokenizer = AutoTokenizer.from_pretrained(model_id) config = config_class( vocab_size=len(tokenizer.vocab), hidden_size=8, num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, num_experts=4, num_experts_per_tok=2, ) model = model_class(config) init_weights_tiny_model(model) push_to_hub(model, tokenizer, "tiny", suffix) # Two slightly bigger models, required for vLLM testing tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-32B-Instruct") config = Qwen2Config( vocab_size=len(tokenizer.vocab), hidden_size=128, # increase hidden size so that hidden_size // num_attention_heads = 32, required for vLLM num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, ) model = Qwen2ForCausalLM(config) push_to_hub(model, tokenizer, "small", "2.5") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B") config = Qwen3Config( vocab_size=len(tokenizer.vocab), hidden_size=128, # increase hidden size so that hidden_size // num_attention_heads = 32, required for vLLM num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, ) model = Qwen3ForCausalLM(config) push_to_hub(model, tokenizer, "small") # Reward models for model_id, config_class, model_class, suffix in [ ("meta-llama/Llama-3.2-1B-Instruct", LlamaConfig, LlamaForSequenceClassification, "3.2"), ("Qwen/Qwen2.5-32B-Instruct", Qwen2Config, Qwen2ForSequenceClassification, "2.5"), ("Qwen/Qwen3-4B", Qwen3Config, Qwen3ForSequenceClassification, None), ]: tokenizer = AutoTokenizer.from_pretrained(model_id) config = config_class( vocab_size=len(tokenizer.vocab), hidden_size=8, num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, num_labels=1, ) model = model_class(config) push_to_hub(model, tokenizer, "tiny", suffix) # Encoder-decoder models for model_id, config_class, model_class, suffix in [ ("facebook/bart-base", BartConfig, BartModel, None), ("google/flan-t5-small", T5Config, T5ForConditionalGeneration, None), ]: tokenizer = AutoTokenizer.from_pretrained(model_id) config = config_class( vocab_size=len(tokenizer.vocab), d_model=16, encoder_layers=2, decoder_layers=2, d_kv=2, d_ff=64, num_layers=6, num_heads=8, decoder_start_token_id=0, is_encoder_decoder=True, ) model = model_class(config) push_to_hub(model, tokenizer, "tiny", suffix) # Vision Language Models for model_id, model_class in [ ("google/gemma-3-4b-it", Gemma3ForConditionalGeneration), ("google/paligemma-3b-pt-224", PaliGemmaForConditionalGeneration), ("HuggingFaceM4/idefics2-8b", Idefics2ForConditionalGeneration), ("HuggingFaceM4/Idefics3-8B-Llama3", Idefics3ForConditionalGeneration), ("HuggingFaceTB/SmolVLM2-2.2B-Instruct", SmolVLMForConditionalGeneration), ("llava-hf/llava-1.5-7b-hf", LlavaForConditionalGeneration), ("llava-hf/llava-v1.6-mistral-7b-hf", LlavaNextForConditionalGeneration), ("OpenGVLab/InternVL3-8B-hf", InternVLForConditionalGeneration), ("Qwen/Qwen2-VL-2B-Instruct", Qwen2VLForConditionalGeneration), ("Qwen/Qwen2.5-VL-3B-Instruct", Qwen2_5_VLForConditionalGeneration), ]: processor = AutoProcessor.from_pretrained(model_id) config = AutoConfig.from_pretrained(model_id) config.text_config.num_hidden_layers = 2 config.text_config.hidden_size = 16 config.text_config.num_attention_heads = 4 config.text_config.num_key_value_heads = 2 config.vision_config.num_hidden_layers = 2 config.vision_config.hidden_size = 16 config.vision_config.num_attention_heads = 4 config.vision_config.num_key_value_heads = 2 if isinstance(config, (Qwen2VLConfig)): config.vision_config.depth = 2 if isinstance(config, (Qwen2VLConfig, Qwen2_5_VLConfig)): config.text_config.rope_scaling["mrope_section"] = [2] if isinstance(config, (Qwen2_5_VLConfig)): config.vision_config.out_hidden_size = 16 if isinstance(config, Idefics2Config): config.perceiver_config.hidden_size = 16 model = model_class(config).to(dtype=torch.bfloat16) push_to_hub(model, processor, "tiny")
trl/scripts/generate_tiny_models.py/0
{ "file_path": "trl/scripts/generate_tiny_models.py", "repo_id": "trl", "token_count": 4792 }
613
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from transformers import AutoModelForCausalLM from transformers.testing_utils import require_peft, require_torch_accelerator, torch_device from transformers.utils import is_peft_available from trl.models.activation_offloading import NoOpManager, OffloadActivations from .testing_utils import TrlTestCase if is_peft_available(): from peft import LoraConfig, get_peft_model class TestActivationOffloading(TrlTestCase): @require_torch_accelerator @require_peft def test_offloading_with_peft_models(self) -> None: """Test that activation offloading works with PEFT models.""" model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" model = AutoModelForCausalLM.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=8, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) inp = torch.randint(0, 100, (2, 10), device=torch_device) # First forward-backward pass without offloading torch.manual_seed(42) loss = model(inp, labels=inp).loss loss.backward() # Store gradients - only from trainable parameters grads_original = [] for name, param in model.named_parameters(): if param.requires_grad and param.grad is not None: grads_original.append((name, param.grad.clone())) # Reset gradients for p in model.parameters(): if p.grad is not None: p.grad = None # Second forward-backward pass with offloading torch.manual_seed(42) with OffloadActivations(): loss_c = model(inp, labels=inp).loss loss_c.backward() # Compare gradients - only trainable parameters for name_orig, grad_orig in grads_original: for name_param, param in model.named_parameters(): if name_param == name_orig and param.requires_grad and param.grad is not None: self.assertTrue( torch.allclose(grad_orig, param.grad, rtol=1e-4, atol=1e-5), f"Gradient mismatch for {name_orig}", ) @require_torch_accelerator def test_noop_manager_with_offloading(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" model = AutoModelForCausalLM.from_pretrained(model_id).to(torch_device) inp = torch.randint(0, 100, (2, 10), device=torch_device) # Run with offloading but disable for specific section with OffloadActivations(): # First forward-backward with normal offloading torch.manual_seed(42) out1 = model(inp, labels=inp) out1.loss.backward() grads1 = [p.grad.clone() for p in model.parameters()] # Reset grads for p in model.parameters(): p.grad = None # Second forward-backward with NoOpManager with NoOpManager(): torch.manual_seed(42) out2 = model(inp, labels=inp) out2.loss.backward() grads2 = [p.grad.clone() for p in model.parameters()] # Gradients should match as NoOpManager should have prevented offloading for g1, g2 in zip(grads1, grads2): self.assertTrue(torch.allclose(g1, g2, rtol=1e-4, atol=1e-5)) @require_torch_accelerator def test_min_offload_size(self): """Test that tensors smaller than min_offload_size aren't offloaded""" model = nn.Sequential( nn.Linear(5, 5), # Small layer that shouldn't be offloaded nn.Linear(5, 1000), # Large layer that should be offloaded ).to(torch_device) inp = torch.randn(2, 5, device=torch_device) with OffloadActivations(min_offload_size=1000): out = model(inp) out.sum().backward() # The test passes if no errors occur, as we're mainly testing # that the logic handles both offloaded and non-offloaded tensors @require_torch_accelerator def test_real_hf_model(self): """Test with an actual HuggingFace model""" model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" model = AutoModelForCausalLM.from_pretrained(model_id).to(torch_device) # Create small input inp = torch.randint(0, 100, (2, 10), device=torch_device) # Baseline without offloading torch.manual_seed(42) out1 = model(inp, labels=inp).loss out1.backward() grads1 = [p.grad.clone() for p in model.parameters()] # Reset grads for p in model.parameters(): p.grad = None # With offloading with OffloadActivations(): torch.manual_seed(42) out2 = model(inp, labels=inp).loss out2.backward() grads2 = [p.grad.clone() for p in model.parameters()] # Check outputs and gradients match self.assertTrue(torch.allclose(out1, out2, rtol=1e-5)) for g1, g2 in zip(grads1, grads2): self.assertTrue(torch.allclose(g1, g2, rtol=1e-5))
trl/tests/test_activation_offloading.py/0
{ "file_path": "trl/tests/test_activation_offloading.py", "repo_id": "trl", "token_count": 2572 }
614
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import torch from datasets import Dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments from trl import IterativeSFTTrainer from .testing_utils import TrlTestCase class IterativeTrainerTester(TrlTestCase): def setUp(self): super().setUp() self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration" self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) def _init_tensor_dummy_dataset(self): dummy_dataset_dict = { "input_ids": [ torch.tensor([5303, 3621, 3666, 1438, 318]), torch.tensor([3666, 1438, 318, 3666, 1438, 318]), torch.tensor([5303, 3621, 3666, 1438, 318]), ], "attention_mask": [ torch.tensor([1, 1, 1, 1, 1]), torch.tensor([1, 1, 1, 1, 1, 1]), torch.tensor([1, 1, 1, 1, 1]), ], "labels": [ torch.tensor([5303, 3621, 3666, 1438, 318]), torch.tensor([3666, 1438, 318, 3666, 1438, 318]), torch.tensor([5303, 3621, 3666, 1438, 318]), ], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset def _init_textual_dummy_dataset(self): dummy_dataset_dict = { "texts": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], "texts_labels": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset @parameterized.expand( [ ["qwen", "tensor"], ["qwen", "text"], ["t5", "tensor"], ["t5", "text"], ] ) def test_iterative_step_from_tensor(self, model_name, input_name): # initialize dataset if input_name == "tensor": dummy_dataset = self._init_tensor_dummy_dataset() inputs = { "input_ids": dummy_dataset["input_ids"], "attention_mask": dummy_dataset["attention_mask"], "labels": dummy_dataset["labels"], } else: dummy_dataset = self._init_textual_dummy_dataset() inputs = { "texts": dummy_dataset["texts"], "texts_labels": dummy_dataset["texts_labels"], } if model_name == "qwen": model = self.model tokenizer = self.tokenizer else: model = self.t5_model tokenizer = self.t5_tokenizer training_args = TrainingArguments( output_dir=self.tmp_dir, per_device_train_batch_size=2, max_steps=2, learning_rate=1e-3, report_to="none", ) iterative_trainer = IterativeSFTTrainer(model=model, args=training_args, processing_class=tokenizer) iterative_trainer.optimizer.zero_grad = partial(iterative_trainer.optimizer.zero_grad, set_to_none=False) iterative_trainer.step(**inputs) for param in iterative_trainer.model.parameters(): self.assertIsNotNone(param.grad)
trl/tests/test_iterative_sft_trainer.py/0
{ "file_path": "trl/tests/test_iterative_sft_trainer.py", "repo_id": "trl", "token_count": 2049 }
615
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer from trl import ( BCOConfig, BCOTrainer, CPOConfig, CPOTrainer, DPOConfig, DPOTrainer, KTOConfig, KTOTrainer, NashMDConfig, NashMDTrainer, OnlineDPOConfig, OnlineDPOTrainer, ORPOConfig, ORPOTrainer, RewardConfig, RewardTrainer, SFTConfig, SFTTrainer, XPOConfig, XPOTrainer, ) from .testing_utils import TrlTestCase, require_sklearn class TrainerArgTester(TrlTestCase): @require_sklearn def test_bco(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train") training_args = BCOConfig( self.tmp_dir, max_length=256, max_prompt_length=64, max_completion_length=64, beta=0.5, label_pad_token_id=-99, padding_value=-99, truncation_mode="keep_start", # generate_during_eval=True, # ignore this one, it requires wandb is_encoder_decoder=True, precompute_ref_log_probs=True, model_init_kwargs={"trust_remote_code": True}, ref_model_init_kwargs={"trust_remote_code": True}, dataset_num_proc=4, prompt_sample_size=512, min_density_ratio=0.2, max_density_ratio=20.0, ) trainer = BCOTrainer( model=model_id, ref_model=model_id, args=training_args, train_dataset=dataset, processing_class=tokenizer, ) self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.max_prompt_length, 64) self.assertEqual(trainer.args.max_completion_length, 64) self.assertEqual(trainer.args.beta, 0.5) self.assertEqual(trainer.args.label_pad_token_id, -99) self.assertEqual(trainer.args.padding_value, -99) self.assertEqual(trainer.args.truncation_mode, "keep_start") # self.assertEqual(trainer.args.generate_during_eval, True) self.assertEqual(trainer.args.is_encoder_decoder, True) self.assertEqual(trainer.args.precompute_ref_log_probs, True) self.assertEqual(trainer.args.model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.ref_model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.dataset_num_proc, 4) self.assertEqual(trainer.args.prompt_sample_size, 512) self.assertEqual(trainer.args.min_density_ratio, 0.2) self.assertEqual(trainer.args.max_density_ratio, 20.0) def test_cpo(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train") training_args = CPOConfig( self.tmp_dir, max_length=256, max_prompt_length=64, max_completion_length=64, beta=0.5, label_smoothing=0.5, loss_type="hinge", disable_dropout=False, cpo_alpha=0.5, simpo_gamma=0.2, label_pad_token_id=-99, padding_value=-99, truncation_mode="keep_start", # generate_during_eval=True, # ignore this one, it requires wandb is_encoder_decoder=True, model_init_kwargs={"trust_remote_code": True}, dataset_num_proc=4, ) trainer = CPOTrainer(model=model_id, args=training_args, train_dataset=dataset, processing_class=tokenizer) self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.max_prompt_length, 64) self.assertEqual(trainer.args.max_completion_length, 64) self.assertEqual(trainer.args.beta, 0.5) self.assertEqual(trainer.args.label_smoothing, 0.5) self.assertEqual(trainer.args.loss_type, "hinge") self.assertEqual(trainer.args.disable_dropout, False) self.assertEqual(trainer.args.cpo_alpha, 0.5) self.assertEqual(trainer.args.simpo_gamma, 0.2) self.assertEqual(trainer.args.label_pad_token_id, -99) self.assertEqual(trainer.args.padding_value, -99) self.assertEqual(trainer.args.truncation_mode, "keep_start") # self.assertEqual(trainer.args.generate_during_eval, True) self.assertEqual(trainer.args.is_encoder_decoder, True) self.assertEqual(trainer.args.model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.dataset_num_proc, 4) def test_dpo(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train") training_args = DPOConfig( self.tmp_dir, beta=0.5, label_smoothing=0.5, loss_type="hinge", label_pad_token_id=-99, padding_value=-99, truncation_mode="keep_start", max_length=256, max_prompt_length=64, max_completion_length=64, disable_dropout=False, # generate_during_eval=True, # ignore this one, it requires wandb precompute_ref_log_probs=True, dataset_num_proc=4, model_init_kwargs={"trust_remote_code": True}, ref_model_init_kwargs={"trust_remote_code": True}, model_adapter_name="dummy_adapter", ref_adapter_name="dummy_adapter", reference_free=True, force_use_ref_model=True, f_divergence_type="js_divergence", f_alpha_divergence_coef=0.5, # sync_ref_model=True, # cannot be True when precompute_ref_log_probs=True. Don't test this. ref_model_mixup_alpha=0.5, ref_model_sync_steps=32, rpo_alpha=0.5, discopop_tau=0.1, ) trainer = DPOTrainer( model=model_id, ref_model=model_id, args=training_args, train_dataset=dataset, processing_class=tokenizer, ) self.assertEqual(trainer.args.beta, 0.5) self.assertEqual(trainer.args.label_smoothing, 0.5) self.assertEqual(trainer.args.loss_type, "hinge") self.assertEqual(trainer.args.label_pad_token_id, -99) self.assertEqual(trainer.args.padding_value, -99) self.assertEqual(trainer.args.truncation_mode, "keep_start") self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.max_prompt_length, 64) self.assertEqual(trainer.args.max_completion_length, 64) self.assertEqual(trainer.args.disable_dropout, False) # self.assertEqual(trainer.args.generate_during_eval, True) self.assertEqual(trainer.args.precompute_ref_log_probs, True) self.assertEqual(trainer.args.dataset_num_proc, 4) self.assertEqual(trainer.args.model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.ref_model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.model_adapter_name, "dummy_adapter") self.assertEqual(trainer.args.ref_adapter_name, "dummy_adapter") self.assertEqual(trainer.args.reference_free, True) self.assertEqual(trainer.args.force_use_ref_model, True) self.assertEqual(trainer.args.f_divergence_type, "js_divergence") self.assertEqual(trainer.args.f_alpha_divergence_coef, 0.5) # self.assertEqual(trainer.args.sync_ref_model, True) self.assertEqual(trainer.args.ref_model_mixup_alpha, 0.5) self.assertEqual(trainer.args.ref_model_sync_steps, 32) self.assertEqual(trainer.args.rpo_alpha, 0.5) self.assertEqual(trainer.args.discopop_tau, 0.1) def test_kto(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train") training_args = KTOConfig( self.tmp_dir, max_length=256, max_prompt_length=64, max_completion_length=64, beta=0.5, desirable_weight=0.5, undesirable_weight=0.5, label_pad_token_id=-99, padding_value=-99, truncation_mode="keep_start", # generate_during_eval=True, # ignore this one, it requires wandb is_encoder_decoder=True, precompute_ref_log_probs=True, model_init_kwargs={"trust_remote_code": True}, ref_model_init_kwargs={"trust_remote_code": True}, dataset_num_proc=4, ) trainer = KTOTrainer( model=model_id, ref_model=model_id, args=training_args, train_dataset=dataset, processing_class=tokenizer, ) self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.max_prompt_length, 64) self.assertEqual(trainer.args.max_completion_length, 64) self.assertEqual(trainer.args.beta, 0.5) self.assertEqual(trainer.args.desirable_weight, 0.5) self.assertEqual(trainer.args.undesirable_weight, 0.5) self.assertEqual(trainer.args.label_pad_token_id, -99) self.assertEqual(trainer.args.padding_value, -99) self.assertEqual(trainer.args.truncation_mode, "keep_start") # self.assertEqual(trainer.args.generate_during_eval, True) self.assertEqual(trainer.args.is_encoder_decoder, True) self.assertEqual(trainer.args.precompute_ref_log_probs, True) self.assertEqual(trainer.args.model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.ref_model_init_kwargs, {"trust_remote_code": True}) self.assertEqual(trainer.args.dataset_num_proc, 4) @parameterized.expand([(False,), (True,)]) def test_nash_md(self, mixtures_coef_list): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) ref_model = AutoModelForCausalLM.from_pretrained(model_id) reward_model = AutoModelForSequenceClassification.from_pretrained(model_id, num_labels=1) dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only", split="train") training_args = NashMDConfig( self.tmp_dir, mixture_coef=0.5 if not mixtures_coef_list else [0.5, 0.6], ) trainer = NashMDTrainer( args=training_args, processing_class=tokenizer, model=model, ref_model=ref_model, reward_model=reward_model, train_dataset=dataset, ) self.assertEqual(trainer.args.mixture_coef, 0.5 if not mixtures_coef_list else [0.5, 0.6]) @parameterized.expand([(False,), (True,)]) def test_online_dpo(self, beta_list): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) ref_model = AutoModelForCausalLM.from_pretrained(model_id) reward_model = AutoModelForSequenceClassification.from_pretrained(model_id, num_labels=1) dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only", split="train") training_args = OnlineDPOConfig( self.tmp_dir, max_new_tokens=42, temperature=0.5, missing_eos_penalty=0.33, beta=0.6 if not beta_list else [0.6, 0.7], loss_type="hinge", dataset_num_proc=4, ) trainer = OnlineDPOTrainer( model=model, ref_model=ref_model, reward_model=reward_model, args=training_args, train_dataset=dataset, processing_class=tokenizer, reward_processing_class=tokenizer, ) self.assertEqual(trainer.args.max_new_tokens, 42) self.assertEqual(trainer.args.temperature, 0.5) self.assertEqual(trainer.args.missing_eos_penalty, 0.33) self.assertEqual(trainer.args.beta, 0.6 if not beta_list else [0.6, 0.7]) self.assertEqual(trainer.args.loss_type, "hinge") self.assertEqual(trainer.args.dataset_num_proc, 4) def test_orpo(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train") training_args = ORPOConfig( self.tmp_dir, max_length=256, max_prompt_length=64, max_completion_length=64, beta=0.5, disable_dropout=False, label_pad_token_id=-99, padding_value=-99, truncation_mode="keep_start", # generate_during_eval=True, # ignore this one, it requires wandb is_encoder_decoder=True, model_init_kwargs={"trust_remote_code": True}, dataset_num_proc=4, ) trainer = ORPOTrainer(model=model_id, args=training_args, train_dataset=dataset, processing_class=tokenizer) self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.max_prompt_length, 64) self.assertEqual(trainer.args.max_completion_length, 64) self.assertEqual(trainer.args.beta, 0.5) self.assertEqual(trainer.args.disable_dropout, False) self.assertEqual(trainer.args.label_pad_token_id, -99) def test_reward(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train") training_args = RewardConfig( self.tmp_dir, max_length=256, dataset_num_proc=4, center_rewards_coefficient=0.1, ) trainer = RewardTrainer( model=model, args=training_args, train_dataset=dataset, processing_class=tokenizer, ) self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.dataset_num_proc, 4) self.assertEqual(trainer.args.center_rewards_coefficient, 0.1) def test_sft(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" dataset = load_dataset("trl-internal-testing/zen", "standard_language_modeling", split="train") training_args = SFTConfig( self.tmp_dir, dataset_text_field="dummy_text_field", packing=True, max_length=256, dataset_num_proc=4, neftune_noise_alpha=0.1, model_init_kwargs={"trust_remote_code": True}, dataset_kwargs={"append_concat_token": True, "skip_prepare_dataset": True}, eval_packing=True, ) trainer = SFTTrainer(model_id, args=training_args, train_dataset=dataset) self.assertEqual(trainer.args.dataset_text_field, "dummy_text_field") self.assertEqual(trainer.args.packing, True) self.assertEqual(trainer.args.max_length, 256) self.assertEqual(trainer.args.dataset_num_proc, 4) self.assertEqual(trainer.args.neftune_noise_alpha, 0.1) self.assertEqual(trainer.args.model_init_kwargs, {"trust_remote_code": True}) self.assertIn("append_concat_token", trainer.args.dataset_kwargs) self.assertEqual(trainer.args.dataset_kwargs["append_concat_token"], True) self.assertEqual(trainer.args.eval_packing, True) @parameterized.expand([(False,), (True,)]) def test_xpo(self, alpha_list): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) ref_model = AutoModelForCausalLM.from_pretrained(model_id) reward_model = AutoModelForSequenceClassification.from_pretrained(model_id, num_labels=1) dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only", split="train") training_args = XPOConfig( self.tmp_dir, alpha=0.5 if not alpha_list else [0.5, 0.6], ) trainer = XPOTrainer( args=training_args, processing_class=tokenizer, model=model, ref_model=ref_model, reward_model=reward_model, train_dataset=dataset, ) self.assertEqual(trainer.args.alpha, 0.5 if not alpha_list else [0.5, 0.6])
trl/tests/test_trainers_args.py/0
{ "file_path": "trl/tests/test_trainers_args.py", "repo_id": "trl", "token_count": 8553 }
616
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict, deque from collections.abc import Sequence from itertools import takewhile from typing import Any, Callable, Optional, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from datasets import Dataset, DatasetDict from transformers import PreTrainedTokenizerBase DatasetType = TypeVar("DatasetType", Dataset, DatasetDict) def prepare_multimodal_messages(messages: list[dict[str, Any]], num_images: int) -> None: """ Convert messages into a structured multimodal format if needed. Each message's content is transformed from a raw string into a list of typed parts. The first user message is prefixed with an image placeholder, while all other user and assistant messages are wrapped as text entries. Args: messages (`list[dict[str, Any]]`): Messages with `"role"` and `"content"`. Content may be a raw string before transformation. num_images (`int`): Number of images to include in the first user message. This is used to determine how many image placeholders to add. Example: ```python # Input [ {"role": "user", "content": "What's in this image?"}, {"role": "assistant", "content": "It looks like a cat."}, ] # Output (num_images=1) [ {"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What's in this image?"}]}, {"role": "assistant", "content": [{"type": "text", "text": "It looks like a cat."}]}, ] ``` """ image_included = False for message in messages: if message["role"] == "system": if isinstance(message["content"], str): # if already prepared, the content will be a list message["content"] = [{"type": "text", "text": message["content"]}] elif message["role"] == "user": if isinstance(message["content"], str) and not image_included: placeholders = [{"type": "image"}] * num_images message["content"] = [*placeholders, {"type": "text", "text": message["content"]}] image_included = True elif isinstance(message["content"], str) and image_included: message["content"] = [{"type": "text", "text": message["content"]}] elif message["role"] == "assistant": if isinstance(message["content"], str): message["content"] = [{"type": "text", "text": message["content"]}] else: raise ValueError(f"Invalid role in message: {message['role']}. Expected 'user', 'assistant', or 'system'.") def is_conversational(example: dict[str, Any]) -> bool: r""" Check if the example is in a conversational format. Args: example (`dict[str, Any]`): A single data entry of a dataset. The example can have different keys depending on the dataset type. Returns: `bool`: `True` if the data is in a conversational format, `False` otherwise. Examples: ```python >>> example = {"prompt": [{"role": "user", "content": "What color is the sky?"}]} >>> is_conversational(example) True >>> example = {"prompt": "The sky is"} >>> is_conversational(example) False ``` """ supported_keys = ["prompt", "chosen", "rejected", "completion", "messages"] example_keys = {key for key in example.keys() if key in supported_keys} # It must have one of the supported keys if example_keys: key = example_keys.pop() # take the first supported key maybe_messages = example[key] # It must be a list of messages if isinstance(maybe_messages, list): maybe_message = maybe_messages[0] # Each message must a list of dictionaries with keys "role" and "content" if isinstance(maybe_message, dict) and "role" in maybe_message and "content" in maybe_message: return True return False def apply_chat_template( example: dict[str, list[dict[str, str]]], tokenizer: PreTrainedTokenizerBase, tools: Optional[list[Union[dict, Callable]]] = None, **template_kwargs, ) -> dict[str, str]: r""" Apply a chat template to a conversational example along with the schema for a list of functions in `tools`. For more details, see [`maybe_apply_chat_template`]. """ # Check that the example has the correct keys supported_keys = ["prompt", "chosen", "rejected", "completion", "messages", "label"] example_keys = {key for key in example.keys() if key in supported_keys} if example_keys not in [ {"messages"}, # language modeling {"prompt"}, # prompt-only {"prompt", "completion"}, # prompt-completion {"prompt", "chosen", "rejected"}, # preference {"chosen", "rejected"}, # preference with implicit prompt {"prompt", "completion", "label"}, # unpaired preference ]: raise KeyError(f"Invalid keys in the example: {example_keys}") # Apply the chat template to the whole conversation if "messages" in example: messages = tokenizer.apply_chat_template(example["messages"], tools=tools, tokenize=False, **template_kwargs) # Apply the chat template to the prompt, adding the generation prompt if "prompt" in example: last_role = example["prompt"][-1]["role"] if last_role == "user": add_generation_prompt = True continue_final_message = False elif last_role == "assistant": add_generation_prompt = False continue_final_message = True else: raise ValueError(f"Invalid role in the last message: {last_role}") prompt = tokenizer.apply_chat_template( example["prompt"], tools=tools, continue_final_message=continue_final_message, tokenize=False, add_generation_prompt=add_generation_prompt, **template_kwargs, ) # Apply the chat template to the entire prompt + completion if "prompt" in example: # explicit prompt and prompt-completion case if "chosen" in example: prompt_chosen = tokenizer.apply_chat_template( example["prompt"] + example["chosen"], tools=tools, tokenize=False, **template_kwargs ) # DeepSeek-R1 inserts a <tool_call> token when using `add_generation_prompt`, which can cause discrepancies # between the prompt alone and the combined prompt+completion. To ensure consistency, we extract the # common prefix between the two. In most cases, this is a no-op. prompt = "".join(x for x, _ in takewhile(lambda x: x[0] == x[1], zip(prompt, prompt_chosen))) chosen = prompt_chosen[len(prompt) :] if "rejected" in example and "prompt" in example: # explicit prompt prompt_rejected = tokenizer.apply_chat_template( example["prompt"] + example["rejected"], tools=tools, tokenize=False, **template_kwargs ) # Handle DeepSeek-R1 <tool_call> token, see the above comment for details prompt = "".join(x for x, _ in takewhile(lambda x: x[0] == x[1], zip(prompt, prompt_rejected))) rejected = prompt_rejected[len(prompt) :] if "completion" in example: prompt_completion = tokenizer.apply_chat_template( example["prompt"] + example["completion"], tools=tools, tokenize=False, **template_kwargs ) # Handle DeepSeek-R1 <tool_call> token, see the above comment for details prompt = "".join(x for x, _ in takewhile(lambda x: x[0] == x[1], zip(prompt, prompt_completion))) completion = prompt_completion[len(prompt) :] else: # implicit prompt case if "chosen" in example: chosen = tokenizer.apply_chat_template(example["chosen"], tools=tools, tokenize=False, **template_kwargs) if "rejected" in example: rejected = tokenizer.apply_chat_template( example["rejected"], tools=tools, tokenize=False, **template_kwargs ) # Extract the completion by removing the prompt part from the prompt-completion string output = {} if "messages" in example: output["text"] = messages if "prompt" in example: output["prompt"] = prompt if "chosen" in example: output["chosen"] = chosen if "rejected" in example: output["rejected"] = rejected if "completion" in example: output["completion"] = completion if "label" in example: output["label"] = example["label"] return output def maybe_apply_chat_template( example: dict[str, list[dict[str, str]]], tokenizer: PreTrainedTokenizerBase, tools: Optional[list[Union[dict, Callable]]] = None, **template_kwargs: Any, ) -> dict[str, str]: r""" If the example is in a conversational format, apply a chat template to it. Args: example (`dict[str, list[dict[str, str]]`): Dictionary representing a single data entry of a conversational dataset. Each data entry can have different keys depending on the dataset type. The supported dataset types are: - Language modeling dataset: `"messages"`. - Prompt-only dataset: `"prompt"`. - Prompt-completion dataset: `"prompt"` and `"completion"`. - Preference dataset: `"prompt"`, `"chosen"`, and `"rejected"`. - Preference dataset with implicit prompt: `"chosen"` and `"rejected"`. - Unpaired preference dataset: `"prompt"`, `"completion"`, and `"label"`. For keys `"messages"`, `"prompt"`, `"chosen"`, `"rejected"`, and `"completion"`, the values are lists of messages, where each message is a dictionary with keys `"role"` and `"content"`. tokenizer (`PreTrainedTokenizerBase`): Tokenizer to apply the chat template with. tools (`list[Union[dict, Callable]]` or `None`, *optional*, defaults to `None`): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. **template_kwargs (`Any`, *optional*): Additional kwargs to pass to the template renderer. Will be accessible by the chat template. Returns: `dict[str, str]`: Formatted example with the chat template applied. Notes: - This function does not alter the keys, except for Language modeling dataset, where `"messages"` is replaced by `"text"`. - In case of prompt-only data, if the last role is `"user"`, the generation prompt is added to the prompt. Else, if the last role is `"assistant"`, the final message is continued. Example: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct") >>> example = { ... "prompt": [{"role": "user", "content": "What color is the sky?"}], ... "completion": [{"role": "assistant", "content": "It is blue."}], ... } >>> apply_chat_template(example, tokenizer) {'prompt': '<|user|>\nWhat color is the sky?<|end|>\n<|assistant|>\n', 'completion': 'It is blue.<|end|>\n'} ``` """ if is_conversational(example): return apply_chat_template(example, tokenizer, tools, **template_kwargs) else: return example def _unpair_row(examples: list[dict[str, list[dict[str, str]]]]) -> list[dict[str, list[dict[str, str]]]]: batch_size = len(examples["chosen"]) new_rows = { "completion": examples["chosen"] + examples["rejected"], "label": [True] * batch_size + [False] * batch_size, } if "prompt" in examples: new_rows["prompt"] = examples["prompt"] + examples["prompt"] return new_rows def unpair_preference_dataset( dataset: DatasetType, num_proc: Optional[int] = None, desc: Optional[str] = None ) -> DatasetType: r""" Unpair a preference dataset. Args: dataset (`Dataset` or `DatasetDict`): Preference dataset to unpair. The dataset must have columns `"chosen"`, `"rejected"` and optionally `"prompt"`. num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. desc (`str` or `None`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Returns: `Dataset`: The unpaired preference dataset. Example: ```python >>> from datasets import Dataset >>> dataset_dict = { ... "prompt": ["The sky is", "The sun is"], ... "chosen": [" blue.", "in the sky."], ... "rejected": [" green.", " in the sea."], ... } >>> dataset = Dataset.from_dict(dataset_dict) >>> dataset = unpair_preference_dataset(dataset) >>> dataset Dataset({ features: ['prompt', 'completion', 'label'], num_rows: 4 }) >>> dataset[0] {'prompt': 'The sky is', 'completion': ' blue.', 'label': True} ``` """ return dataset.map(_unpair_row, batched=True, remove_columns=["chosen", "rejected"], num_proc=num_proc, desc=desc) def maybe_unpair_preference_dataset( dataset: DatasetType, num_proc: Optional[int] = None, desc: Optional[str] = None ) -> DatasetType: r""" Unpair a preference dataset if it is paired. Args: dataset (`Dataset` or `DatasetDict`): Preference dataset to unpair. The dataset must have columns `"chosen"`, `"rejected"` and optionally `"prompt"`. num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. desc (`str` or `None`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples. Returns: `Dataset` or `DatasetDict`: The unpaired preference dataset if it was paired, otherwise the original dataset. Example: ```python >>> from datasets import Dataset >>> dataset_dict = { ... "prompt": ["The sky is", "The sun is"], ... "chosen": [" blue.", "in the sky."], ... "rejected": [" green.", " in the sea."], ... } >>> dataset = Dataset.from_dict(dataset_dict) >>> dataset = unpair_preference_dataset(dataset) >>> dataset Dataset({ features: ['prompt', 'completion', 'label'], num_rows: 4 }) >>> dataset[0] {'prompt': 'The sky is', 'completion': ' blue.', 'label': True} ``` """ if isinstance(dataset, DatasetDict): column_names = dataset[list(dataset.keys())[0]].column_names else: column_names = dataset.column_names if "chosen" in column_names and "rejected" in column_names: return unpair_preference_dataset(dataset, num_proc=num_proc, desc=desc) else: return dataset def extract_prompt(example: dict[str, Sequence]) -> dict[str, Sequence]: r""" Extracts the shared prompt from a preference data example, where the prompt is implicit within both the chosen and rejected completions. For more details, see [`maybe_extract_prompt`]. """ for idx in range(min(len(example["chosen"]), len(example["rejected"]))): if example["chosen"][idx] != example["rejected"][idx]: if example["chosen"][idx - 1] == " ": # remove space before the prompt idx -= 1 break return { "prompt": example["chosen"][:idx], "chosen": example["chosen"][idx:], "rejected": example["rejected"][idx:], } def maybe_extract_prompt(example: dict[str, list]) -> dict[str, list]: r""" Extracts the shared prompt from a preference data example, where the prompt is implicit within both the chosen and rejected completions. If the example already contains a `"prompt"` key, the function returns the example as is. Else, the function identifies the longest common sequence (prefix) of conversation turns between the "chosen" and "rejected" completions and extracts this as the prompt. It then removes this prompt from the respective "chosen" and "rejected" completions. Args: example (`dict[str, list]`): A dictionary representing a single data entry in the preference dataset. It must contain the keys `"chosen"` and `"rejected"`, where each value is either conversational or standard (`str`). Returns: `dict[str, list]`: A dictionary containing: - `"prompt"`: The longest common prefix between the "chosen" and "rejected" completions. - `"chosen"`: The remainder of the "chosen" completion, with the prompt removed. - `"rejected"`: The remainder of the "rejected" completion, with the prompt removed. Examples: ```python >>> example = { ... "chosen": [ ... {"role": "user", "content": "What color is the sky?"}, ... {"role": "assistant", "content": "It is blue."}, ... ], ... "rejected": [ ... {"role": "user", "content": "What color is the sky?"}, ... {"role": "assistant", "content": "It is green."}, ... ], ... } >>> extract_prompt(example) {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}], 'chosen': [{'role': 'assistant', 'content': 'It is blue.'}], 'rejected': [{'role': 'assistant', 'content': 'It is green.'}]} ``` Or, with the `map` method of `datasets.Dataset`: ```python >>> from trl import extract_prompt >>> from datasets import Dataset >>> dataset_dict = { ... "chosen": [ ... [ ... {"role": "user", "content": "What color is the sky?"}, ... {"role": "assistant", "content": "It is blue."}, ... ], ... [ ... {"role": "user", "content": "Where is the sun?"}, ... {"role": "assistant", "content": "In the sky."}, ... ], ... ], ... "rejected": [ ... [ ... {"role": "user", "content": "What color is the sky?"}, ... {"role": "assistant", "content": "It is green."}, ... ], ... [ ... {"role": "user", "content": "Where is the sun?"}, ... {"role": "assistant", "content": "In the sea."}, ... ], ... ], ... } >>> dataset = Dataset.from_dict(dataset_dict) >>> dataset = dataset.map(extract_prompt) >>> dataset[0] {'prompt': [{'role': 'user', 'content': 'What color is the sky?'}], 'chosen': [{'role': 'assistant', 'content': 'It is blue.'}], 'rejected': [{'role': 'assistant', 'content': 'It is green.'}]} ``` """ # Some dataset add a `"prompt"` column, even though the prompt is implicit and included in the "chosen" and # "rejected" completions. E.g.: # {"prompt": "What color is the sky?", # "chosen": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}], # "rejected": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}]} # That's why we check if the prompt is also conversational before deciding not to extract it. if "chosen" not in example or "rejected" not in example: # not a preference example return example if "prompt" in example: # Both conversational or both non-conversational chosen_conv = is_conversational({"chosen": example["chosen"]}) prompt_conv = is_conversational({"prompt": example["prompt"]}) if (chosen_conv and prompt_conv) or (not chosen_conv and not prompt_conv): return example return extract_prompt({"chosen": example["chosen"], "rejected": example["rejected"]}) class _SegmentTree: """ A segment tree data structure that, when initialized as `_SegmentTree(maxval)`, efficiently finds the next larger value for a given input within the range [1, maxval]. See [Fewer Truncations Improve Language Modeling](https://arxiv.org/abs/2404.10830) for more details. """ def __init__(self, maxval: int): self.maxval = maxval # For non-power-of-2 values, we need to round up to the next power of 2 for the tree size self.tree_size = 1 << (maxval - 1).bit_length() self.tree = [0] * (2 * self.tree_size) def add(self, val): assert 0 < val <= self.maxval i = self.tree_size + val - 1 self.tree[i] = val while i > 1: i >>= 1 left, right = self.tree[i << 1], self.tree[(i << 1) + 1] # Compare the values using if-else otherwise repeated calls to `builtins.max` become the bottleneck self.tree[i] = left if left >= right else right def remove(self, val): assert 0 < val <= self.maxval i = self.tree_size + val - 1 self.tree[i] = 0 while i > 1: i >>= 1 left, right = self.tree[i << 1], self.tree[(i << 1) + 1] # Compare the values using if-else otherwise repeated calls to `builtins.max` become the bottleneck self.tree[i] = left if left >= right else right def search(self, val): assert 0 < val <= self.maxval i = 1 while i < self.tree_size: if self.tree[i << 1] >= val: i = i << 1 else: i = (i << 1) + 1 return self.tree[i] def _pack_bfd(examples: pa.Table, seq_length: int) -> pa.Table: """Pack sequences in a pyarrow Table using Best Fit Decreasing strategy.""" columns = [] list_column_idx = None for idx, column in enumerate(examples.columns): if pyarrow.types.is_list(column.type) or pyarrow.types.is_large_list(column.type): column = pc.list_slice(column, 0, seq_length) if list_column_idx is None: list_column_idx = idx columns.append(column) examples = pa.Table.from_arrays(columns, names=examples.column_names) ids = np.arange(len(examples)) assert list_column_idx is not None lengths = pc.list_value_length(examples[list_column_idx]).combine_chunks() examples = examples.append_column("seq_lengths", lengths) # Allows us to later construct `position_ids` lengths = pc.make_struct(lengths, ids) lengths = lengths.sort("descending", by=0) segment_tree = _SegmentTree(seq_length) segment_tree.add(seq_length) # the max, `seq_length` bin is always available space_to_bin = defaultdict(deque) # Bin is represented as a dict (of example ids and sum of their lengths) to allow in-place updates bins: list[dict] = [] for length, idx in zip(lengths.field(0).to_numpy(), lengths.field(1).to_numpy()): space = segment_tree.search(length) if space < seq_length: # Use existing bin with exactly this amount of space bin = space_to_bin[space].popleft() else: # Create a new bin bin = {"ids": [], "length": 0} bins.append(bin) bin["ids"].append(idx) bin["length"] += length if space < seq_length and not space_to_bin[space]: segment_tree.remove(space) space = space - length space_to_bin[space].append(bin) if space > 0: segment_tree.add(space) examples = pc.take(examples, [id_ for bin in bins for id_ in bin["ids"]]) offsets = np.array([0] + [bin["length"] for bin in bins]) offsets = np.cumsum(offsets) assert all( column.num_chunks == 1 for column in examples.columns ) # `pc.take` returns a ChunkedArray with a single chunk lengths = examples["seq_lengths"].chunks[0] examples = examples.drop_columns("seq_lengths") lengths = pa.ListArray.from_arrays(np.cumsum([0] + [len(bin["ids"]) for bin in bins], dtype=np.int32), lengths) columns = [] for column in examples.columns: column = column.chunks[0] if pa.types.is_list(column.type) or pa.types.is_large_list(column.type): dtype = column.offsets.type.to_pandas_dtype() column = type(column).from_arrays(offsets.astype(dtype), column.values) columns.append(column) return pa.Table.from_arrays(columns + [lengths], names=examples.column_names + ["seq_lengths"]) def _pack_wrapped(examples: pa.Table, seq_length: int) -> pa.Table: """Pack sequences in a pyarrow Table using a wrapped strategy.""" columns = [] for column in examples.columns: if pyarrow.types.is_list(column.type) or pyarrow.types.is_large_list(column.type): if isinstance(column, pa.ChunkedArray): column = column.combine_chunks() offsets, values = column.offsets, column.values values = values[offsets[0].as_py() : offsets[-1].as_py()] num_elements = len(values) dtype = offsets.type.to_pandas_dtype() # np.int32 or np.int64 offsets = np.arange(0, num_elements, seq_length, dtype=dtype) offsets = np.concatenate((offsets, [num_elements])) column = type(column).from_arrays(offsets, values) columns.append(column) return pa.Table.from_arrays(columns, names=examples.column_names) def pack_dataset( dataset: DatasetType, seq_length: int, strategy: str = "bfd", map_kwargs: Optional[dict[str, Any]] = None ) -> DatasetType: r""" Pack sequences in a dataset into chunks of size `seq_length`. Args: dataset (`Dataset` or `DatasetDict`): Dataset to pack seq_length (`int`): Target sequence length to pack to. strategy (`str`, *optional*, defaults to `"bfd"`): Packing strategy to use. Can be either: - `"bfd"` (Best Fit Decreasing): Slower but preserves sequence boundaries. Sequences are never cut in the middle. - `"wrapped"`: Faster but more aggressive. Ignores sequence boundaries and will cut sequences in the middle to completely fill each packed sequence with data. map_kwargs (`dict` or `None`, *optional*, defaults to `None`): Additional keyword arguments to pass to the dataset's map method when packing examples. Returns: `Dataset` or `DatasetDict`: The dataset with packed sequences. The number of examples may decrease as sequences are combined. Example: ```python >>> from datasets import Dataset >>> from trl import pack_dataset >>> examples = { ... "input_ids": [[1, 2, 3], [4, 5], [6, 7, 8], [9]], ... "attention_mask": [[1, 1, 0], [1, 0], [1, 0, 0], [1]], ... } >>> dataset = Dataset.from_dict(examples) >>> packed_dataset = pack_dataset(dataset, seq_length=4, strategy="bfd") >>> packed_dataset[:] {'input_ids': [[1, 2, 3, 9], [6, 7, 8, 4, 5]], 'attention_mask': [[1, 1, 0, 1], [1, 0, 0, 1, 0]]} ``` """ if map_kwargs is None: map_kwargs = {} # Fast packing with pyarrow dataset = dataset.with_format("arrow") if strategy == "bfd": dataset = dataset.map(_pack_bfd, batched=True, fn_kwargs={"seq_length": seq_length}, **map_kwargs) elif strategy == "wrapped": dataset = dataset.map(_pack_wrapped, batched=True, fn_kwargs={"seq_length": seq_length}, **map_kwargs) else: raise ValueError(f"Invalid packing strategy: {strategy}. Use 'bfd' or 'wrapped'.") dataset = dataset.with_format(None) return dataset def truncate_dataset( dataset: DatasetType, max_length: int, map_kwargs: Optional[dict[str, Any]] = None ) -> DatasetType: r""" Truncate sequences in a dataset to a specified `max_length`. Args: dataset (`Dataset` or `DatasetDict`): Dataset to truncate. seq_length (`int`): Maximum sequence length to truncate to. map_kwargs (`dict` or `None`, *optional*, defaults to `None`): Additional keyword arguments to pass to the dataset's map method when truncating examples. Returns: `Dataset` or `DatasetDict`: The dataset with truncated sequences. Example: ```python >>> from datasets import Dataset >>> examples = { ... "input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]], ... "attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]], ... } >>> dataset = Dataset.from_dict(examples) >>> truncated_dataset = truncate_dataset(dataset, max_length=2) >>> truncated_dataset[:] {'input_ids': [[1, 2], [4, 5], [8]], 'attention_mask': [[0, 1], [0, 0], [1]]} ``` """ if map_kwargs is None: map_kwargs = {} if isinstance(dataset, Dataset): # Fast truncation with pyarrow def truncate(examples): truncated_columns = [] for column in examples.columns: if pyarrow.types.is_list(column.type) or pyarrow.types.is_large_list(column.type): column = pc.list_slice(column, 0, max_length) truncated_columns.append(column) return pa.Table.from_arrays(truncated_columns, names=examples.column_names) dataset = dataset.with_format("arrow") dataset = dataset.map(truncate, batched=True, **map_kwargs) dataset = dataset.with_format(None) else: def truncate(examples): truncated_examples = {} for key, column in examples.items(): if column and isinstance(column[0], list): column = [val[:max_length] for val in column] truncated_examples[key] = column return truncated_examples dataset = dataset.map( truncate, batched=True, **map_kwargs, ) return dataset def is_conversational_from_value(example: dict[str, Any]) -> bool: r""" Check if the example is in a conversational format (from/value). Note that this format isn't recommended. Prefer the ChatML format (role/content) Args: example (`dict[str, Any]`): A single data entry of a dataset. The example can have different keys depending on the dataset type. Returns: `bool`: `True` if the data is in a conversational Chatformat, `False` otherwise. Examples: ```python >>> example = {"conversations": [{"from": "user", "value": "What color is the sky?"}]} >>> is_conversational_from_value(example) True >>> example = {"conversations": [{"role": "user", "content": "What color is the sky?"}]} >>> is_conversational_from_value(example) False >>> example = {"conversations": "The sky is"} >>> is_conversational_from_value(example) False ``` """ maybe_messages = example.get("conversations") # It must be a list of messages if isinstance(maybe_messages, list): maybe_message = maybe_messages[0] # Each message must a list of dictionaries with keys "from" and "value" if isinstance(maybe_message, dict) and "from" in maybe_message and "value" in maybe_message: return True return False def maybe_convert_to_chatml(example: dict[str, list]) -> dict[str, list]: """ Convert a conversational dataset with fields `from` and `value` to ChatML format. This function modifies conversational data to align with OpenAI's ChatML format: - Replaces the key `"from"` with `"role"` in message dictionaries. - Replaces the key `"value"` with `"content"` in message dictionaries. - Renames `"conversations"` to `"messages"` for consistency with ChatML. Args: example (`dict[str, list]`): A single data entry containing a list of messages. Returns: `dict[str, list]`: Example reformatted to ChatML style. Example: ```python >>> from trl import maybe_convert_to_chatml >>> example = { ... "conversations": [ ... {"from": "user", "value": "What color is the sky?"}, ... {"from": "assistant", "value": "It is blue."}, ... ] ... } >>> maybe_convert_to_chatml(example) {'messages': [{'role': 'user', 'content': 'What color is the sky?'}, {'role': 'assistant', 'content': 'It is blue.'}]} ``` """ # List of possible keys containing message lists for key in ["prompt", "completion", "chosen", "rejected", "messages", "conversations"]: if key in example and isinstance(example[key], list): messages = example[key] for message in messages: if isinstance(message, dict): if "from" in message: message["role"] = message.pop("from") if "value" in message: message["content"] = message.pop("value") # Rename "conversations" to "messages" if "conversations" in example: example["messages"] = example.pop("conversations") return example
trl/trl/data_utils.py/0
{ "file_path": "trl/trl/data_utils.py", "repo_id": "trl", "token_count": 13663 }
617
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from transformers import TrainingArguments @dataclass class BCOConfig(TrainingArguments): r""" Configuration class for the [`BCOTrainer`]. This class includes only the parameters that are specific to BCO training. For a full list of training arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may differ from those in [`~transformers.TrainingArguments`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, *optional*, defaults to `0.1`): Parameter controlling the deviation from the reference model. Higher β means less deviation from the reference model. label_pad_token_id (`int`, *optional*, defaults to `-100`): Label pad token id. This argument is required if you want to use the default data collator. padding_value (`int` or `None`, *optional*, defaults to `None`): Padding value to use. If `None`, the padding value of the tokenizer is used. truncation_mode (`str`, *optional*, defaults to `"keep_end"`): Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. This argument is required if you want to use the default data collator. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model and reference model. generate_during_eval (`bool`, *optional*, defaults to `False`): If `True`, generates and logs completions from both the model and the reference model to W&B or Comet during evaluation. is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`): When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model. precompute_ref_log_probs (`bool`, *optional*, defaults to `False`): Whether to precompute reference model log probabilities for training and evaluation datasets. This is useful when training without the reference model to reduce the total GPU memory needed. model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string. ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model from a string. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. prompt_sample_size (`int`, *optional*, defaults to `1024`): Number of prompts that are fed to density ratio classifier. min_density_ratio (`float`, *optional*, defaults to `0.5`): Minimum value of the density ratio. The estimated density ratio is clamped to this value. max_density_ratio (`float`, *optional*, defaults to `10.0`): Maximum value of the density ratio. The estimated density ratio is clamped to this value. """ _VALID_DICT_FIELDS = TrainingArguments._VALID_DICT_FIELDS + ["model_init_kwargs", "ref_model_init_kwargs"] # Parameters whose default values are overridden from TrainingArguments logging_steps: float = field( default=10, metadata={ "help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, " "will be interpreted as ratio of total training steps." }, ) gradient_checkpointing: bool = field( default=True, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) bf16: Optional[bool] = field( default=None, metadata={ "help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA " "architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if " "`fp16` is not set." }, ) max_length: Optional[int] = field( default=1024, metadata={ "help": "Maximum length of the sequences (prompt + completion) in the batch. " "This argument is required if you want to use the default data collator." }, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. " "This argument is required if you want to use the default data collator." }, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion. This argument is required if you want to use the " "default data collator and your model is an encoder-decoder." }, ) beta: float = field( default=0.1, metadata={ "help": "Parameter controlling the deviation from the reference model. " "Higher β means less deviation from the reference model." }, ) label_pad_token_id: int = field( default=-100, metadata={ "help": "Label pad token id. This argument is required if you want to use the default data collator." }, ) padding_value: Optional[int] = field( default=None, metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."}, ) truncation_mode: str = field( default="keep_end", metadata={ "help": "Truncation mode to use when the prompt is too long. Possible values are " "`keep_end` or `keep_start`. This argument is required if you want to use the " "default data collator." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model and reference model."}, ) generate_during_eval: bool = field( default=False, metadata={ "help": "If `True`, generates and logs completions from both the model and the reference model " "to W&B during evaluation." }, ) is_encoder_decoder: Optional[bool] = field( default=None, metadata={ "help": "When using the `model_init` argument (callable) to instantiate the model instead of the " "`model` argument, you need to specify if the model returned by the callable is an " "encoder-decoder model." }, ) precompute_ref_log_probs: bool = field( default=False, metadata={ "help": "Whether to precompute reference model log probabilities for training and evaluation datasets. " "This is useful when training without the reference model to reduce the total GPU memory " "needed." }, ) model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the " "model from a string." }, ) ref_model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the " "reference model from a string." }, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, ) prompt_sample_size: int = field( default=1024, metadata={"help": "Number of prompts that are fed to density ratio classifier."}, ) min_density_ratio: float = field( default=0.5, metadata={"help": "Minimum value of the density ratio. The estimated density ratio is clamped to this value."}, ) max_density_ratio: float = field( default=10.0, metadata={"help": "Maximum value of the density ratio. The estimated density ratio is clamped to this value."}, ) def __post_init__(self): self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16 super().__post_init__()
trl/trl/trainer/bco_config.py/0
{ "file_path": "trl/trl/trainer/bco_config.py", "repo_id": "trl", "token_count": 3736 }
618
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from transformers import TrainingArguments @dataclass class KTOConfig(TrainingArguments): r""" Configuration class for the [`KTOTrainer`]. This class includes only the parameters that are specific to KTO training. For a full list of training arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may differ from those in [`~transformers.TrainingArguments`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, *optional*, defaults to `0.1`): Parameter controlling the deviation from the reference model. Higher β means less deviation from the reference model. loss_type (`str`, *optional*, defaults to `"kto"`): Type of loss to use. Possible values are: - `"kto"`: KTO loss from the [KTO](https://huggingface.co/papers/2402.01306) paper. - `"apo_zero_unpaired"`: Unpaired variant of APO-zero loss from the [APO](https://huggingface.co/papers/2408.06266) paper. desirable_weight (`float`, *optional*, defaults to `1.0`): Desirable losses are weighed by this factor to counter unequal number of desirable and undesirable paris. undesirable_weight (`float`, *optional*, defaults to `1.0`): Undesirable losses are weighed by this factor to counter unequal number of desirable and undesirable pairs. label_pad_token_id (`int`, *optional*, defaults to `-100`): Label pad token id. This argument is required if you want to use the default data collator. padding_value (`int` or `None`, *optional*, defaults to `None`): Padding value to use. If `None`, the padding value of the tokenizer is used. truncation_mode (`str`, *optional*, defaults to `"keep_end"`): Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, *optional*, defaults to `False`): If `True`, generates and logs completions from both the model and the reference model to W&B or Comet during evaluation. is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`): When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model. precompute_ref_log_probs (`bool`, *optional*, defaults to `False`): Whether to precompute reference model log probabilities for training and evaluation datasets. This is useful when training without the reference model to reduce the total GPU memory needed. model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string. ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model from a string. dataset_num_proc: (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model and reference model. use_liger_loss (`bool`, *optional*, defaults to `False`): Whether to use Liger loss. It requires liger-kernel to be installed. base_model_attribute_name (`str`, *optional*, defaults to `"model"`): Name of the attribute in the model that contains the base model. This is used to get the base model from the model when the model does not have a `get_decoder` method in the case when `use_liger_loss` is `True`. """ _VALID_DICT_FIELDS = TrainingArguments._VALID_DICT_FIELDS + ["model_init_kwargs", "ref_model_init_kwargs"] # Parameters whose default values are overridden from TrainingArguments learning_rate: float = field( default=1e-6, metadata={"help": "The initial learning rate for AdamW."}, ) logging_steps: float = field( default=10, metadata={ "help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, " "will be interpreted as ratio of total training steps." }, ) gradient_checkpointing: bool = field( default=True, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) bf16: Optional[bool] = field( default=None, metadata={ "help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA " "architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if " "`fp16` is not set." }, ) max_length: Optional[int] = field( default=1024, metadata={"help": "Maximum length of the sequences (prompt + completion) in the batch."}, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) beta: float = field( default=0.1, metadata={ "help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from " "the reference model." }, ) loss_type: str = field( default="kto", metadata={ "help": "Type of loss to use.", "choices": ["kto", "apo_zero_unpaired"], }, ) desirable_weight: float = field( default=1.0, metadata={ "help": "Desirable losses are weighed by this factor to counter unequal number of desirable and " "undesirable pairs.", }, ) undesirable_weight: float = field( default=1.0, metadata={ "help": "Undesirable losses are weighed by this factor to counter unequal number of desirable and " "undesirable pairs.", }, ) label_pad_token_id: int = field( default=-100, metadata={ "help": "Label pad token id. This argument is required if you want to use the default data collator." }, ) padding_value: Optional[int] = field( default=None, metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."}, ) truncation_mode: str = field( default="keep_end", metadata={ "help": "Truncation mode to use when the prompt is too long.", "choices": ["keep_end", "keep_start"], }, ) generate_during_eval: bool = field( default=False, metadata={ "help": "If `True`, generates and logs completions from both the model and the reference model to W&B " "during evaluation." }, ) is_encoder_decoder: Optional[bool] = field( default=None, metadata={ "help": "When using the `model_init` argument (callable) to instantiate the model instead of the `model` " "argument, you need to specify if the model returned by the callable is an encoder-decoder model." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model."}, ) precompute_ref_log_probs: bool = field( default=False, metadata={ "help": "Whether to precompute reference model log probabilities for training and evaluation datasets. " "This is useful when training without the reference model to reduce the total GPU memory needed." }, ) model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model " "from a string." }, ) ref_model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the " "reference model from a string." }, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, ) use_liger_loss: bool = field( default=False, metadata={"help": "Whether to use Liger loss. It requires liger-kernel to be installed."}, ) base_model_attribute_name: str = field( default="model", metadata={ "help": "Name of the attribute in the model that contains the base model. This is used to get the base " "model from the model when the model does not have a `get_decoder` method in the case when " "`use_liger_loss` is `True`." }, ) def __post_init__(self): self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16 super().__post_init__()
trl/trl/trainer/kto_config.py/0
{ "file_path": "trl/trl/trainer/kto_config.py", "repo_id": "trl", "token_count": 4307 }
619
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import math import os import textwrap import time from collections import defaultdict from pathlib import Path from typing import Callable, Optional, Union import numpy as np import pandas as pd import torch import torch.nn as nn from accelerate import Accelerator from accelerate.utils import broadcast, gather_object from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( BaseImageProcessor, DataCollatorWithPadding, FeatureExtractionMixin, GenerationConfig, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, TrainerControl, is_wandb_available, ) from transformers.integrations import get_reporting_integration_callbacks from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK from transformers.trainer_callback import CallbackHandler, ExportableState, PrinterCallback from transformers.utils import is_rich_available from ..models.utils import unwrap_model_for_generation from ..trainer.utils import ( OnlineTrainerState, batch_generation, disable_dropout_in_model, exact_div, first_true_indices, forward, get_reward, prepare_deepspeed, print_rich_table, selective_log_softmax, truncate_response, ) from .rloo_config import RLOOConfig from .utils import empty_cache, generate_model_card, get_comet_experiment_url, log_table_to_comet_experiment if is_wandb_available(): import wandb INVALID_LOGPROB = 1.0 class RLOOTrainer(Trainer): _tag_names = ["trl", "rloo"] def __init__( self, config: RLOOConfig, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ], policy: nn.Module, ref_policy: nn.Module, reward_model: Union[nn.Module, Callable[[list[str]], list[float]]], train_dataset: Dataset, data_collator: Optional[DataCollatorWithPadding] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, # less commonly used optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), callbacks: Optional[list[TrainerCallback]] = None, ) -> None: if ref_policy is policy: raise ValueError( "`policy` and `ref_policy` cannot be the same object. If you want `ref_policy` to be the " "same as `policy`, you must mass a copy of it, or `None` if you use peft." ) self.args = config args = config self.processing_class = processing_class self.policy = policy # Define the collator if not provided if data_collator is None: data_collator = DataCollatorWithPadding(self.processing_class) self.policy.generation_config.eos_token_id = ( None # disable `pad_token_id` and `eos_token_id` because we just want to ) self.policy.generation_config.pad_token_id = None # generate tokens without truncation / padding self.ref_policy = ref_policy self.reward_model = reward_model self.train_dataset = train_dataset self.train_dataset_len = len(train_dataset) self.data_collator = data_collator self.eval_dataset = eval_dataset self.optimizer, self.lr_scheduler = optimizers self.optimizer_cls_and_kwargs = None # needed for transformers >= 4.47 ######### # calculate various batch sizes ######### if args.total_episodes is None: # allow the users to define episodes in terms of epochs. args.total_episodes = int(args.num_train_epochs * self.train_dataset_len) accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) self.accelerator = accelerator args.world_size = accelerator.num_processes args.local_batch_size = ( args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches ) args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size) args.batch_size = int(args.local_batch_size * args.world_size) args.mini_batch_size = exact_div( args.batch_size, args.num_mini_batches, "`batch_size` must be a multiple of `num_mini_batches`" ) args.local_mini_batch_size = exact_div( args.local_batch_size, args.num_mini_batches, "`local_batch_size` must be a multiple of `num_mini_batches`" ) args.num_total_batches = math.ceil( args.total_episodes / args.batch_size ) # we may train for more than `total_episodes` time_tensor = torch.tensor(int(time.time()), device=accelerator.device) time_int = broadcast(time_tensor, 0).item() # avoid different timestamps across processes args.run_name = f"{args.exp_name}__{args.seed}__{time_int}" self.local_seed = args.seed + accelerator.process_index * 100003 # Prime if args.num_sample_generations > 0: self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations) self.local_dataloader_batch_size = exact_div( args.local_batch_size, args.rloo_k, "`local_batch_size` must be a multiple of rloo_k" ) # RLOO logic: needed because RLOO repeats the same prompt args.rloo_k times ######### # setup model, optimizer, and others ######### for module in [policy, ref_policy, reward_model]: if isinstance(module, nn.Module): disable_dropout_in_model(module) if args.stop_token and args.stop_token == "eos": args.stop_token_id = self.processing_class.eos_token_id self.model = policy self.create_optimizer_and_scheduler( num_training_steps=args.num_total_batches ) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level ######### ### trainer specifics ######### default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( self.callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self.control = TrainerControl() self.state = OnlineTrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ], ) self.current_flos = 0 self.hp_search_backend = None self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None # Create distant repo and output directory if needed self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) self.backup_model = None # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) ######### ### setup dataloader ######### self.dataloader = DataLoader( self.train_dataset, batch_size=self.local_dataloader_batch_size, shuffle=True, collate_fn=self.data_collator, drop_last=True, # needed; otherwise the last batch will be of ragged shape ) # sync random states for DataLoader(shuffle=True) before `accelerator.prepare` # see https://gist.github.com/vwxyzjn/2581bff1e48e185e0b85b6dfe1def79c torch.manual_seed(args.seed) self.model, self.optimizer, self.dataloader = accelerator.prepare(self.model, self.optimizer, self.dataloader) torch.manual_seed(self.local_seed) # reset the local seed again self.eval_dataloader = DataLoader( self.eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=self.data_collator, drop_last=True, ) # no need to shuffle eval dataset self.eval_dataloader = accelerator.prepare(self.eval_dataloader) if self.is_deepspeed_enabled: if isinstance(self.reward_model, nn.Module): self.reward_model = prepare_deepspeed( self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16 ) self.ref_policy = prepare_deepspeed( self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16 ) self.deepspeed = self.model else: self.ref_policy = self.ref_policy.to(self.accelerator.device) if isinstance(self.reward_model, nn.Module): self.reward_model = self.reward_model.to(self.accelerator.device) def get_train_dataloader(self) -> DataLoader: return self.dataloader def get_eval_dataloader(self) -> DataLoader: return self.eval_dataloader def train(self): args = self.args accelerator = self.accelerator optimizer = self.optimizer model = self.model self.model_wrapped = self.model ref_policy = self.ref_policy reward_model = self.reward_model processing_class = self.processing_class dataloader = self.dataloader device = accelerator.device def repeat_generator(): while True: yield from dataloader iter_dataloader = iter(repeat_generator()) generation_config = GenerationConfig( max_new_tokens=args.response_length, temperature=(args.temperature + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) accelerator.print("===training policy===") start_time = time.time() stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps) approxkl_stats = torch.zeros(stats_shape, device=device) pg_clipfrac_stats = torch.zeros(stats_shape, device=device) pg_loss_stats = torch.zeros(stats_shape, device=device) vf_clipfrac_stats = torch.zeros(stats_shape, device=device) entropy_stats = torch.zeros(stats_shape, device=device) ratio_stats = torch.zeros(stats_shape, device=device) model.train() # trainer state initialization self.state.global_step = 0 self.state.episode = 0 self.state.max_steps = (args.num_total_batches * args.num_mini_batches) // 2 self.state.num_train_epochs = args.total_episodes / self.train_dataset_len # Compute absolute values for logging, eval, and save if given as ratio if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps) else: self.state.save_steps = args.save_steps self.control = self.callback_handler.on_train_begin(args, self.state, self.control) for update in range(1, args.num_total_batches + 1): self.state.episode += 1 * args.batch_size data = next(iter_dataloader) with torch.no_grad(): queries = data["input_ids"].to(device) queries = queries.repeat(args.rloo_k, 1) context_length = queries.shape[1] responses = [] postprocessed_responses = [] logprobs = [] ref_logprobs = [] scores = [] sequence_lengths = [] # Generate responses and compute logprobs with unwrap_model_for_generation( self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation ) as unwrapped_model: query_responses, logitss = batch_generation( unwrapped_model, queries, args.local_rollout_forward_batch_size, processing_class.pad_token_id, generation_config, ) # Process responses in batches for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size): query = queries[i : i + args.local_rollout_forward_batch_size] query_response = query_responses[i : i + args.local_rollout_forward_batch_size] response = query_response[:, context_length:] logits = logitss[i : i + args.local_rollout_forward_batch_size] logprob = selective_log_softmax(logits, response) del logits empty_cache() ref_output = forward(ref_policy, query_response, processing_class.pad_token_id) ref_logits = ref_output.logits[:, context_length - 1 : -1] ref_logits /= args.temperature + 1e-7 ref_logprob = selective_log_softmax(ref_logits, response) del ref_output, ref_logits empty_cache() # Response Processing 1. truncate response after the first occurrence of `stop_token_id` postprocessed_response = response if args.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( args.stop_token_id, processing_class.pad_token_id, response ) # Response Processing 2. run reward model on the truncated responses postprocessed_query_response = torch.cat((query, postprocessed_response), 1) sequence_length = first_true_indices(postprocessed_response == processing_class.pad_token_id) - 1 if isinstance(reward_model, nn.Module): _, score, _ = get_reward( reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length ) else: score = torch.tensor( reward_model( processing_class.batch_decode(postprocessed_query_response, skip_special_tokens=True) ), dtype=torch.float, ).to(device) # Store batch results responses.append(response) postprocessed_responses.append(postprocessed_response) logprobs.append(logprob) ref_logprobs.append(ref_logprob) sequence_lengths.append(sequence_length) scores.append(score) # Concatenate all batched results responses = torch.cat(responses, 0) postprocessed_responses = torch.cat(postprocessed_responses, 0) logprobs = torch.cat(logprobs, 0) ref_logprobs = torch.cat(ref_logprobs, 0) sequence_lengths = torch.cat(sequence_lengths, 0) scores = torch.cat(scores, 0) del (logprob, ref_logprob, score) empty_cache() gc.collect() # Response Processing 3. filter response. Ensure that the sample contains stop_token_id # responses not passing that filter will receive a low (fixed) score # only query humans on responses that pass that filter contain_eos_token = torch.any(postprocessed_responses == processing_class.eos_token_id, dim=-1) if args.missing_eos_penalty is not None: scores[~contain_eos_token] -= self.args.missing_eos_penalty # accelerator.print(f"{scores=}, {(contain_eos_token.sum() / len(contain_eos_token))=}") # be very careful with `padding_mask_p1`; see https://excalidraw.com/#json=LWnzG4w2k5DjF_EOL_xPt,e2w3a-hFJ_gX5vOfeyXGTw response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1) padding_mask = response_idxs > sequence_lengths.unsqueeze(1) logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB) ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB) # 4. compute rewards # Compute KL divergence kl = logprobs - ref_logprobs # Normalize rewards if args.normalize_reward: scores = (scores - scores.mean()) / (scores.std() + 1e-8) scores = torch.clamp(scores, -args.reward_clip_range, args.reward_clip_range) # Compute total reward with KL penalty if args.token_level_kl: # Token-level KL penalty: apply KL penalty per token kl_reward = -args.kl_coef * kl # Get the index of the last non-padded token for each sequence eos_indices = padding_mask.size(1) - 1 - padding_mask.long().fliplr().argmax(dim=1, keepdim=True) last_reward = torch.zeros_like(kl) # Ensure scores has correct shape and type scores_shaped = scores.reshape(-1, 1).to(kl.dtype) last_reward.scatter_(dim=1, index=eos_indices, src=scores_shaped) # Combine KL reward and last reward non_score_reward = kl_reward.sum(1) # Keep this for logging reward = last_reward + kl_reward rlhf_reward = reward.sum(1) # Sum across sequence length else: # Sequence-level KL penalty: sum KL across tokens first sequence_kl = kl.sum(1) non_score_reward = -args.kl_coef * sequence_kl rlhf_reward = non_score_reward + scores # vectorized RLOO advantages implementation rlhf_reward = rlhf_reward.reshape(args.rloo_k, -1) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (args.rloo_k - 1) advantages = rlhf_reward - baseline advantages = advantages.flatten() # Normalize advantages if args.normalize_advantage: advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) empty_cache() # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch for ppo_epoch_idx in range(args.num_ppo_epochs): b_inds = np.random.permutation(args.local_batch_size) minibatch_idx = 0 for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size): mini_batch_end = mini_batch_start + args.local_mini_batch_size mini_batch_inds = b_inds[mini_batch_start:mini_batch_end] gradient_accumulation_idx = 0 for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size): with accelerator.accumulate(model): micro_batch_end = micro_batch_start + args.per_device_train_batch_size micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end] # Get batch data mb_advantage = advantages[micro_batch_inds] mb_responses = responses[micro_batch_inds] mb_query_responses = query_responses[micro_batch_inds] mb_logprobs = logprobs[micro_batch_inds] # Forward pass output = forward(model, mb_query_responses, processing_class.pad_token_id) logits = output.logits[:, context_length - 1 : -1] logits /= args.temperature + 1e-7 # Compute new logprobs new_logprobs = selective_log_softmax(logits, mb_responses) new_logprobs = torch.masked_fill( new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB ) # Compute probability ratios new_ratio = (new_logprobs - mb_logprobs).exp() new_logprobs = new_logprobs.sum(1) mb_logprobs = mb_logprobs.sum(1) logprobs_diff = new_logprobs - mb_logprobs ratio = torch.exp(logprobs_diff) # PPO clipped loss pg_losses = -mb_advantage * ratio pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange) pg_loss_max = torch.max(pg_losses, pg_losses2) pg_loss = pg_loss_max.mean() # Final loss loss = pg_loss # Optimization step accelerator.backward(loss) optimizer.step() optimizer.zero_grad() with torch.no_grad(): pg_clipfrac = (pg_losses2 > pg_losses).float().mean() prob_dist = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1) approxkl = 0.5 * (logprobs_diff**2).mean() approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ( pg_clipfrac ) pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean() ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = new_ratio.mean() gradient_accumulation_idx += 1 minibatch_idx += 1 # del everything and empty cache # fmt: off del ( output, logits, new_logprobs, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_advantage, mb_responses, mb_query_responses, mb_logprobs, ) # fmt: on empty_cache() # Compute metrics with torch.no_grad(): mean_kl = kl.sum(1).mean() mean_entropy = (-logprobs).sum(1).mean() mean_non_score_reward = non_score_reward.mean() eps = int(self.state.episode / (time.time() - start_time)) metrics = {} metrics["eps"] = eps metrics["objective/kl"] = self.accelerator.gather_for_metrics(mean_kl).mean().item() metrics["objective/entropy"] = self.accelerator.gather_for_metrics(mean_entropy).mean().item() metrics["objective/non_score_reward"] = ( self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item() ) metrics["objective/rlhf_reward"] = self.accelerator.gather_for_metrics(rlhf_reward).mean().item() metrics["objective/scores"] = self.accelerator.gather_for_metrics(scores.mean()).mean().item() metrics["policy/approxkl_avg"] = self.accelerator.gather_for_metrics(approxkl_stats).mean().item() metrics["policy/clipfrac_avg"] = self.accelerator.gather_for_metrics(pg_clipfrac_stats).mean().item() metrics["loss/policy_avg"] = self.accelerator.gather_for_metrics(pg_loss_stats).mean().item() metrics["val/clipfrac_avg"] = self.accelerator.gather_for_metrics(vf_clipfrac_stats).mean().item() metrics["policy/entropy_avg"] = self.accelerator.gather_for_metrics(entropy_stats).mean().item() metrics["val/ratio"] = self.accelerator.gather_for_metrics(ratio_stats).mean().item() metrics["val/ratio_var"] = self.accelerator.gather_for_metrics(ratio_stats).var().item() metrics["val/num_eos_tokens"] = (responses == processing_class.eos_token_id).sum().item() metrics["lr"] = self.lr_scheduler.get_last_lr()[0] metrics["episode"] = self.state.episode self.state.epoch = self.state.episode / (args.rloo_k * self.train_dataset_len) # used by self.log self.log(metrics) del kl, mean_kl, mean_entropy, scores self.lr_scheduler.step() self.state.global_step += 1 self.control = self.callback_handler.on_step_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) empty_cache() gc.collect() if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0: self.generate_completions(sampling=True) # HF trainer specifics self.control = self.callback_handler.on_train_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def generate_completions(self, sampling: bool = False): args = self.args processing_class = self.processing_class generation_config = GenerationConfig( max_new_tokens=self.args.response_length, temperature=(0.01 + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) table = defaultdict(list) with unwrap_model_for_generation( self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation ) as unwrapped_model: for batch in self.eval_dataloader: query = batch["input_ids"] with torch.no_grad(): context_length = query.shape[1] query_response, _ = batch_generation( unwrapped_model, query, query.shape[0], processing_class.pad_token_id, generation_config, ) response = query_response[:, context_length:] postprocessed_response = response if args.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( args.stop_token_id, processing_class.pad_token_id, response ) table["query"].extend( gather_object(processing_class.batch_decode(query, skip_special_tokens=True)) ) table["model response"].extend( gather_object(processing_class.batch_decode(postprocessed_response)) ) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) if isinstance(self.reward_model, nn.Module): _, score, _ = get_reward( self.reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length, ) else: score = torch.tensor( self.reward_model( processing_class.batch_decode(postprocessed_query_response, skip_special_tokens=True) ), dtype=torch.float, ).to(postprocessed_query_response.device) table["score"].extend(self.accelerator.gather_for_metrics(score).float().cpu().numpy()) if sampling: break df = pd.DataFrame(table) if self.accelerator.is_main_process: if is_rich_available(): print_rich_table(df.iloc[0 : 0 + 5]) if "wandb" in args.report_to: import wandb if wandb.run is not None: wandb.log({"completions": wandb.Table(dataframe=df)}) if "comet_ml" in args.report_to: log_table_to_comet_experiment( name="completions.csv", table=df, ) # Ensure the model card is saved along with the checkpoint def _save_checkpoint(self, model, trial): if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] self.create_model_card(model_name=model_name) super()._save_checkpoint(model, trial) def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None # normalize `tags` to a mutable set if tags is None: tags = set() elif isinstance(tags, str): tags = {tags} else: tags = set(tags) if hasattr(self.model.config, "unsloth_version"): tags.add("unsloth") tags.update(self._tag_names) # docstyle-ignore citation = textwrap.dedent("""\ @inproceedings{ahmadian2024back, title = {{Back to Basics: Revisiting REINFORCE-Style Optimization for Learning from Human Feedback in LLMs}}, author = {Arash Ahmadian and Chris Cremer and Matthias Gall{\'{e}} and Marzieh Fadaee and Julia Kreutzer and Olivier Pietquin and Ahmet {\"{U}}st{\"{u}}n and Sara Hooker}, year = 2024, booktitle = {Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), {ACL} 2024, Bangkok, Thailand, August 11-16, 2024}, publisher = {Association for Computational Linguistics}, pages = {12248--12267}, editor = {Lun{-}Wei Ku and Andre Martins and Vivek Srikumar}, }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="RLOO", trainer_citation=citation, paper_title="Back to Basics: Revisiting REINFORCE-Style Optimization for Learning from Human Feedback in LLMs", paper_id="2402.14740", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/rloo_trainer.py/0
{ "file_path": "trl/trl/trainer/rloo_trainer.py", "repo_id": "trl", "token_count": 17101 }
620
# Introduction <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/pokemon_thumbnail.png" alt="Bonus Unit 3 AI in Games"/> 🎶I want to be the very best ... 🎶 Welcome to this **bonus unit**, where you'll explore the exciting intersection of **AI Agents and games**! 🎮🤖 Imagine a game where non-playable characters (NPCs) don’t just follow scripted lines, but instead hold dynamic conversations, adapt to your strategies, and evolve as the story unfolds. This is the power of combining **LLMs and agentic behavior in games**: it opens the door to **emergent storytelling and gameplay like never before**. In this bonus unit, you’ll: - Learn how to build an AI Agent that can engage in **Pokémon-style turn-based battles** - Play against it, or even challenge other agents online We've already seen [some](https://www.anthropic.com/research/visible-extended-thinking) [examples](https://www.twitch.tv/gemini_plays_pokemon) from the AI community for playing Pokémon using LLMs, and in this unit you'll learn how you can replicate that using your own Agent with the ideas that you've learnt through the course. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/claude-plays-pokemon.png" alt="Claude plays Pokémon"/> ## Want to go further? - 🎓 **Master LLMs in Games**: Dive deeper into game development with our full course [Machine Learning for Games Course](https://hf.co/learn/ml-games-course). - 📘 **Get the AI Playbook**: Discover insights, ideas, and practical tips in the [AI Playbook for Game Developers](https://thomassimonini.substack.com/), where the future of intelligent game design is explored. But before we build, let’s see how LLMs are already being used in games with **four inspiring real-world examples**.
agents-course/units/en/bonus-unit3/introduction.mdx/0
{ "file_path": "agents-course/units/en/bonus-unit3/introduction.mdx", "repo_id": "agents-course", "token_count": 516 }
0
### Q1: What is an Agent? Which of the following best describes an AI Agent? <Question choices={[ { text: "An AI model that can reason, plan, and use tools to interact with its environment to achieve a specific goal.", explain: "This definition captures the essential characteristics of an Agent.", correct: true }, { text: "A system that solely processes static text, without any inherent mechanism to interact dynamically with its surroundings or execute meaningful actions.", explain: "An Agent must be able to take an action and interact with its environment.", }, { text: "A conversational agent restricted to answering queries, lacking the ability to perform any actions or interact with external systems.", explain: "A chatbot like this lacks the ability to take actions, making it different from an Agent.", }, { text: "An online repository of information that offers static content without the capability to execute tasks or interact actively with users.", explain: "An Agent actively interacts with its environment rather than just providing static information.", } ]} /> --- ### Q2: What is the Role of Planning in an Agent? Why does an Agent need to plan before taking an action? <Question choices={[ { text: "To primarily store or recall past interactions, rather than mapping out a sequence of future actions.", explain: "Planning is about determining future actions, not storing past interactions.", }, { text: "To decide on the sequence of actions and select appropriate tools needed to fulfill the user’s request.", explain: "Planning helps the Agent determine the best steps and tools to complete a task.", correct: true }, { text: "To execute a sequence of arbitrary and uncoordinated actions that lack any defined strategy or intentional objective.", explain: "Planning ensures the Agent's actions are intentional and not random.", }, { text: "To merely convert or translate text, bypassing any process of formulating a deliberate sequence of actions or employing strategic reasoning.", explain: "Planning is about structuring actions, not just converting text.", } ]} /> --- ### Q3: How Do Tools Enhance an Agent's Capabilities? Why are tools essential for an Agent? <Question choices={[ { text: "Tools serve no real purpose and do not contribute to the Agent’s ability to perform actions beyond basic text generation.", explain: "Tools expand an Agent's capabilities by allowing it to perform actions beyond text generation.", }, { text: "Tools are solely designed for memory storage, lacking any capacity to facilitate the execution of tasks or enhance interactive performance.", explain: "Tools are primarily for performing actions, not just for storing data.", }, { text: "Tools severely restrict the Agent exclusively to generating text, thereby preventing it from engaging in a broader range of interactive actions.", explain: "On the contrary, tools allow Agents to go beyond text-based responses.", }, { text: "Tools provide the Agent with the ability to execute actions a text-generation model cannot perform natively, such as making coffee or generating images.", explain: "Tools enable Agents to interact with the real world and complete tasks.", correct: true } ]} /> --- ### Q4: How Do Actions Differ from Tools? What is the key difference between Actions and Tools? <Question choices={[ { text: "Actions are the steps the Agent takes, while Tools are external resources the Agent can use to perform those actions.", explain: "Actions are higher-level objectives, while Tools are specific functions the Agent can call upon.", correct: true }, { text: "Actions and Tools are entirely identical components that can be used interchangeably, with no clear differences between them.", explain: "No, Actions are goals or tasks, while Tools are specific utilities the Agent uses to achieve them.", }, { text: "Tools are considered broad utilities available for various functions, whereas Actions are mistakenly thought to be restricted only to physical interactions.", explain: "Not necessarily. Actions can involve both digital and physical tasks.", }, { text: "Actions inherently require the use of LLMs to be determined and executed, whereas Tools are designed to function autonomously without such dependencies.", explain: "While LLMs help decide Actions, Actions themselves are not dependent on LLMs.", } ]} /> --- ### Q5: What Role Do Large Language Models (LLMs) Play in Agents? How do LLMs contribute to an Agent’s functionality? <Question choices={[ { text: "LLMs function merely as passive repositories that store information, lacking any capability to actively process input or produce dynamic responses.", explain: "LLMs actively process text input and generate responses, rather than just storing information.", }, { text: "LLMs serve as the reasoning 'brain' of the Agent, processing text inputs to understand instructions and plan actions.", explain: "LLMs enable the Agent to interpret, plan, and decide on the next steps.", correct: true }, { text: "LLMs are erroneously believed to be used solely for image processing, when in fact their primary function is to process and generate text.", explain: "LLMs primarily work with text, although they can sometimes interact with multimodal inputs.", }, { text: "LLMs are considered completely irrelevant to the operation of AI Agents, implying that they are entirely superfluous in any practical application.", explain: "LLMs are a core component of modern AI Agents.", } ]} /> --- ### Q6: Which of the Following Best Demonstrates an AI Agent? Which real-world example best illustrates an AI Agent at work? <Question choices={[ { text: "A static FAQ page on a website that provides fixed information and lacks any interactive or dynamic response capabilities.", explain: "A static FAQ page does not interact dynamically with users or take actions.", }, { text: "A simple calculator that performs arithmetic operations based on fixed rules, without any capability for reasoning or planning.", explain: "A calculator follows fixed rules without reasoning or planning, so it is not an Agent.", }, { text: "A virtual assistant like Siri or Alexa that can understand spoken commands, reason through them, and perform tasks like setting reminders or sending messages.", explain: "This example includes reasoning, planning, and interaction with the environment.", correct: true }, { text: "A video game NPC that operates on a fixed script of responses, without the ability to reason, plan, or use external tools.", explain: "Unless the NPC can reason, plan, and use tools, it does not function as an AI Agent.", } ]} /> --- Congrats on finishing this Quiz 🥳! If you need to review any elements, take the time to revisit the chapter to reinforce your knowledge before diving deeper into the "Agent's brain": LLMs.
agents-course/units/en/unit1/quiz1.mdx/0
{ "file_path": "agents-course/units/en/unit1/quiz1.mdx", "repo_id": "agents-course", "token_count": 1562 }
1
# Using Agents in LlamaIndex Remember Alfred, our helpful butler agent from earlier? Well, he's about to get an upgrade! Now that we understand the tools available in LlamaIndex, we can give Alfred new capabilities to serve us better. But before we continue, let's remind ourselves what makes an agent like Alfred tick. Back in Unit 1, we learned that: > An Agent is a system that leverages an AI model to interact with its environment to achieve a user-defined objective. It combines reasoning, planning, and action execution (often via external tools) to fulfil tasks. LlamaIndex supports **three main types of reasoning agents:** ![Agents](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/agents.png) 1. `Function Calling Agents` - These work with AI models that can call specific functions. 2. `ReAct Agents` - These can work with any AI that does chat or text endpoint and deal with complex reasoning tasks. 3. `Advanced Custom Agents` - These use more complex methods to deal with more complex tasks and workflows. <Tip>Find more information on advanced agents on <a href="https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/agent/workflow/base_agent.py">BaseWorkflowAgent</a></Tip> ## Initialising Agents <Tip> You can follow the code in <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/llama-index/agents.ipynb" target="_blank">this notebook</a> that you can run using Google Colab. </Tip> To create an agent, we start by providing it with a **set of functions/tools that define its capabilities**. Let's look at how to create an agent with some basic tools. As of this writing, the agent will automatically use the function calling API (if available), or a standard ReAct agent loop. LLMs that support a tools/functions API are relatively new, but they provide a powerful way to call tools by avoiding specific prompting and allowing the LLM to create tool calls based on provided schemas. ReAct agents are also good at complex reasoning tasks and can work with any LLM that has chat or text completion capabilities. They are more verbose, and show the reasoning behind certain actions that they take. ```python from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI from llama_index.core.agent.workflow import AgentWorkflow from llama_index.core.tools import FunctionTool # define sample Tool -- type annotations, function names, and docstrings, are all included in parsed schemas! def multiply(a: int, b: int) -> int: """Multiplies two integers and returns the resulting integer""" return a * b # initialize llm llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") # initialize agent agent = AgentWorkflow.from_tools_or_functions( [FunctionTool.from_defaults(multiply)], llm=llm ) ``` **Agents are stateless by default**, however, they can remember past interactions using a `Context` object. This might be useful if you want to use an agent that needs to remember previous interactions, like a chatbot that maintains context across multiple messages or a task manager that needs to track progress over time. ```python # stateless response = await agent.run("What is 2 times 2?") # remembering state from llama_index.core.workflow import Context ctx = Context(agent) response = await agent.run("My name is Bob.", ctx=ctx) response = await agent.run("What was my name again?", ctx=ctx) ``` You'll notice that agents in `LlamaIndex` are async because they use Python's `await` operator. If you are new to async code in Python, or need a refresher, they have an [excellent async guide](https://docs.llamaindex.ai/en/stable/getting_started/async_python/). Now we've gotten the basics, let's take a look at how we can use more complex tools in our agents. ## Creating RAG Agents with QueryEngineTools **Agentic RAG is a powerful way to use agents to answer questions about your data.** We can pass various tools to Alfred to help him answer questions. However, instead of answering the question on top of documents automatically, Alfred can decide to use any other tool or flow to answer the question. ![Agentic RAG](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/agentic-rag.png) It is easy to **wrap `QueryEngine` as a tool** for an agent. When doing so, we need to **define a name and description**. The LLM will use this information to correctly use the tool. Let's see how to load in a `QueryEngineTool` using the `QueryEngine` we created in the [component section](components). ```python from llama_index.core.tools import QueryEngineTool query_engine = index.as_query_engine(llm=llm, similarity_top_k=3) # as shown in the Components in LlamaIndex section query_engine_tool = QueryEngineTool.from_defaults( query_engine=query_engine, name="name", description="a specific description", return_direct=False, ) query_engine_agent = AgentWorkflow.from_tools_or_functions( [query_engine_tool], llm=llm, system_prompt="You are a helpful assistant that has access to a database containing persona descriptions. " ) ``` ## Creating Multi-agent systems The `AgentWorkflow` class also directly supports multi-agent systems. By giving each agent a name and description, the system maintains a single active speaker, with each agent having the ability to hand off to another agent. By narrowing the scope of each agent, we can help increase their general accuracy when responding to user messages. **Agents in LlamaIndex can also directly be used as tools** for other agents, for more complex and custom scenarios. ```python from llama_index.core.agent.workflow import ( AgentWorkflow, FunctionAgent, ReActAgent, ) # Define some tools def add(a: int, b: int) -> int: """Add two numbers.""" return a + b def subtract(a: int, b: int) -> int: """Subtract two numbers.""" return a - b # Create agent configs # NOTE: we can use FunctionAgent or ReActAgent here. # FunctionAgent works for LLMs with a function calling API. # ReActAgent works for any LLM. calculator_agent = ReActAgent( name="calculator", description="Performs basic arithmetic operations", system_prompt="You are a calculator assistant. Use your tools for any math operation.", tools=[add, subtract], llm=llm, ) query_agent = ReActAgent( name="info_lookup", description="Looks up information about XYZ", system_prompt="Use your tool to query a RAG system to answer information about XYZ", tools=[query_engine_tool], llm=llm ) # Create and run the workflow agent = AgentWorkflow( agents=[calculator_agent, query_agent], root_agent="calculator" ) # Run the system response = await agent.run(user_msg="Can you add 5 and 3?") ``` <Tip>Haven't learned enough yet? There is a lot more to discover about agents and tools in LlamaIndex within the <a href="https://docs.llamaindex.ai/en/stable/examples/agent/agent_workflow_basic/">AgentWorkflow Basic Introduction</a> or the <a href="https://docs.llamaindex.ai/en/stable/understanding/agent/">Agent Learning Guide</a>, where you can read more about streaming, context serialization, and human-in-the-loop!</Tip> Now that we understand the basics of agents and tools in LlamaIndex, let's see how we can use LlamaIndex to **create configurable and manageable workflows!**
agents-course/units/en/unit2/llama-index/agents.mdx/0
{ "file_path": "agents-course/units/en/unit2/llama-index/agents.mdx", "repo_id": "agents-course", "token_count": 2112 }
2
<CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/retrieval_agents.ipynb"}, ]} askForHelpUrl="http://hf.co/join/discord" /> # Building Agentic RAG Systems <Tip> You can follow the code in <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/retrieval_agents.ipynb" target="_blank">this notebook</a> that you can run using Google Colab. </Tip> Retrieval Augmented Generation (RAG) systems combine the capabilities of data retrieval and generation models to provide context-aware responses. For example, a user's query is passed to a search engine, and the retrieved results are given to the model along with the query. The model then generates a response based on the query and retrieved information. Agentic RAG (Retrieval-Augmented Generation) extends traditional RAG systems by **combining autonomous agents with dynamic knowledge retrieval**. While traditional RAG systems use an LLM to answer queries based on retrieved data, agentic RAG **enables intelligent control of both retrieval and generation processes**, improving efficiency and accuracy. Traditional RAG systems face key limitations, such as **relying on a single retrieval step** and focusing on direct semantic similarity with the user’s query, which may overlook relevant information. Agentic RAG addresses these issues by allowing the agent to autonomously formulate search queries, critique retrieved results, and conduct multiple retrieval steps for a more tailored and comprehensive output. ## Basic Retrieval with DuckDuckGo Let's build a simple agent that can search the web using DuckDuckGo. This agent will retrieve information and synthesize responses to answer queries. With Agentic RAG, Alfred's agent can: * Search for latest superhero party trends * Refine results to include luxury elements * Synthesize information into a complete plan Here's how Alfred's agent can achieve this: ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel # Initialize the search tool search_tool = DuckDuckGoSearchTool() # Initialize the model model = InferenceClientModel() agent = CodeAgent( model=model, tools=[search_tool], ) # Example usage response = agent.run( "Search for luxury superhero-themed party ideas, including decorations, entertainment, and catering." ) print(response) ``` The agent follows this process: 1. **Analyzes the Request:** Alfred’s agent identifies the key elements of the query—luxury superhero-themed party planning, with focus on decor, entertainment, and catering. 2. **Performs Retrieval:** The agent leverages DuckDuckGo to search for the most relevant and up-to-date information, ensuring it aligns with Alfred’s refined preferences for a luxurious event. 3. **Synthesizes Information:** After gathering the results, the agent processes them into a cohesive, actionable plan for Alfred, covering all aspects of the party. 4. **Stores for Future Reference:** The agent stores the retrieved information for easy access when planning future events, optimizing efficiency in subsequent tasks. ## Custom Knowledge Base Tool For specialized tasks, a custom knowledge base can be invaluable. Let's create a tool that queries a vector database of technical documentation or specialized knowledge. Using semantic search, the agent can find the most relevant information for Alfred's needs. A vector database stores numerical representations (embeddings) of text or other data, created by machine learning models. It enables semantic search by identifying similar meanings in high-dimensional space. This approach combines predefined knowledge with semantic search to provide context-aware solutions for event planning. With specialized knowledge access, Alfred can perfect every detail of the party. In this example, we'll create a tool that retrieves party planning ideas from a custom knowledge base. We'll use a BM25 retriever to search the knowledge base and return the top results, and `RecursiveCharacterTextSplitter` to split the documents into smaller chunks for more efficient search. ```python from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from smolagents import Tool from langchain_community.retrievers import BM25Retriever from smolagents import CodeAgent, InferenceClientModel class PartyPlanningRetrieverTool(Tool): name = "party_planning_retriever" description = "Uses semantic search to retrieve relevant party planning ideas for Alfred’s superhero-themed party at Wayne Manor." inputs = { "query": { "type": "string", "description": "The query to perform. This should be a query related to party planning or superhero themes.", } } output_type = "string" def __init__(self, docs, **kwargs): super().__init__(**kwargs) self.retriever = BM25Retriever.from_documents( docs, k=5 # Retrieve the top 5 documents ) def forward(self, query: str) -> str: assert isinstance(query, str), "Your search query must be a string" docs = self.retriever.invoke( query, ) return "\nRetrieved ideas:\n" + "".join( [ f"\n\n===== Idea {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs) ] ) # Simulate a knowledge base about party planning party_ideas = [ {"text": "A superhero-themed masquerade ball with luxury decor, including gold accents and velvet curtains.", "source": "Party Ideas 1"}, {"text": "Hire a professional DJ who can play themed music for superheroes like Batman and Wonder Woman.", "source": "Entertainment Ideas"}, {"text": "For catering, serve dishes named after superheroes, like 'The Hulk's Green Smoothie' and 'Iron Man's Power Steak.'", "source": "Catering Ideas"}, {"text": "Decorate with iconic superhero logos and projections of Gotham and other superhero cities around the venue.", "source": "Decoration Ideas"}, {"text": "Interactive experiences with VR where guests can engage in superhero simulations or compete in themed games.", "source": "Entertainment Ideas"} ] source_docs = [ Document(page_content=doc["text"], metadata={"source": doc["source"]}) for doc in party_ideas ] # Split the documents into smaller chunks for more efficient search text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, add_start_index=True, strip_whitespace=True, separators=["\n\n", "\n", ".", " ", ""], ) docs_processed = text_splitter.split_documents(source_docs) # Create the retriever tool party_planning_retriever = PartyPlanningRetrieverTool(docs_processed) # Initialize the agent agent = CodeAgent(tools=[party_planning_retriever], model=InferenceClientModel()) # Example usage response = agent.run( "Find ideas for a luxury superhero-themed party, including entertainment, catering, and decoration options." ) print(response) ``` This enhanced agent can: 1. First check the documentation for relevant information 2. Combine insights from the knowledge base 3. Maintain conversation context in memory ## Enhanced Retrieval Capabilities When building agentic RAG systems, the agent can employ sophisticated strategies like: 1. **Query Reformulation:** Instead of using the raw user query, the agent can craft optimized search terms that better match the target documents 2. **Query Decomposition:** Instead of using the user query directly, if it contains multiple pieces of information to query, it can be decomposed to multiple queries 3. **Query Expansion:** Somehow similar to Query Reformulation but done multiple times to put the query in multiple wordings to query them all 4. **Reranking:** Using Cross-Encoders to assign more comprehensive and semantic relevance scores between retrieved documents and search query 5. **Multi-Step Retrieval:** The agent can perform multiple searches, using initial results to inform subsequent queries 6. **Source Integration:** Information can be combined from multiple sources like web search and local documentation 7. **Result Validation:** Retrieved content can be analyzed for relevance and accuracy before being included in responses Effective agentic RAG systems require careful consideration of several key aspects. The agent **should select between available tools based on the query type and context**. Memory systems help maintain conversation history and avoid repetitive retrievals. Having fallback strategies ensures the system can still provide value even when primary retrieval methods fail. Additionally, implementing validation steps helps ensure the accuracy and relevance of retrieved information. ## Resources - [Agentic RAG: turbocharge your RAG with query reformulation and self-query! 🚀](https://huggingface.co/learn/cookbook/agent_rag) - Recipe for developing an Agentic RAG system using smolagents.
agents-course/units/en/unit2/smolagents/retrieval_agents.mdx/0
{ "file_path": "agents-course/units/en/unit2/smolagents/retrieval_agents.mdx", "repo_id": "agents-course", "token_count": 2404 }
3
# Welcome to the final Unit [[introduction]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="AI Agents Course thumbnail" width="100%"/> Welcome to the final unit of the course! 🎉 So far, you’ve **built a strong foundation in AI Agents**, from understanding their components to creating your own. With this knowledge, you’re now ready to **build powerful agents** and stay up-to-date with the latest advancements in this fast-evolving field. This unit is all about applying what you’ve learned. It’s your **final hands-on project**, and completing it is your ticket to earning the **course certificate**. ## What’s the challenge? You’ll create your own agent and **evaluate its performance using a subset of the [GAIA benchmark](https://huggingface.co/spaces/gaia-benchmark/leaderboard)**. To successfully complete the course, your agent needs to score **30% or higher** on the benchmark. Achieve that, and you’ll earn your **Certificate of Completion**, officially recognizing your expertise. 🏅 Additionally, see how you stack up against your peers! A dedicated **[Student Leaderboard](https://huggingface.co/spaces/agents-course/Students_leaderboard)** is available for you to submit your scores and see the community's progress. > ** 🚨 Heads Up: Advanced & Hands-On Unit** > > Please be aware that this unit shifts towards a more practical, hands-on approach. Success in this section will require **more advanced coding knowledge** and relies on you navigating tasks with **less explicit guidance** compared to earlier parts of the course. Sounds exciting? Let’s get started! 🚀
agents-course/units/en/unit4/introduction.mdx/0
{ "file_path": "agents-course/units/en/unit4/introduction.mdx", "repo_id": "agents-course", "token_count": 433 }
4
# El Estado del Arte en el Uso de LLMs en Juegos Para darte una idea de cuánto se ha avanzado en este campo, examinemos tres demos tecnológicas y un juego publicado que muestran la integración de LLMs en los videojuegos. ## 🕵️‍♂️ Covert Protocol por NVIDIA e Inworld AI <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/covert-protocol.jpg" alt="Covert Protocol"/> Presentado en GDC 2024, *Covert Protocol* es una demo tecnológica que te pone en la piel de un detective privado. Lo interesante de esta demo es el uso de PNJs impulsados por IA que responden a tus preguntas en tiempo real, influenciando la narrativa en función de tus interacciones. La demo está construida sobre Unreal Engine 5, aprovecha el Avatar Cloud Engine (ACE) de NVIDIA y la IA de Inworld para crear interacciones de personajes realistas. Obtén más información aquí 👉 [Blog de Inworld AI](https://inworld.ai/blog/nvidia-inworld-ai-demo-on-device-capabilities) ## 🤖 NEO NPCs por Ubisoft <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/neo-npc.jpeg" alt="Neo NPC"/> También en GDC 2024, Ubisoft presentó *NEO NPCs*, un prototipo que muestra PNJs impulsados por IA generativa. Estos personajes pueden percibir su entorno, recordar interacciones pasadas y entablar conversaciones significativas con los jugadores. La idea aquí es crear mundos de juego más inmersivos y receptivos donde el jugador pueda tener una verdadera interacción con los PNJs. Obtén más información aquí 👉 [Blog de Inworld AI](https://inworld.ai/blog/gdc-2024) ## ⚔️ Mecha BREAK con ACE de NVIDIA <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/mecha-break.jpg" alt="Mecha BREAK"/> *Mecha BREAK*, un próximo juego de batalla de mechs multijugador, integra la tecnología ACE de NVIDIA para dar vida a PNJs impulsados por IA. Los jugadores pueden interactuar con estos personajes usando lenguaje natural, y los PNJs pueden reconocer a los jugadores y objetos a través de la cámara web, gracias a la integración de GPT-4o. Esta innovación promete una experiencia de juego más inmersiva e interactiva. Obtén más información aquí 👉 [Blog de NVIDIA](https://blogs.nvidia.com/blog/digital-human-technology-mecha-break/) ## 🧛‍♂️ *Suck Up!* por Proxima Enterprises <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/suck-up.jpg" alt="Suck Up"/> Finalmente, *Suck Up!* es un juego publicado en el que juegas como un vampiro que intenta entrar en las casas **convenciendo a PNJs impulsados por IA para que te inviten a pasar.** Cada personaje es impulsado por IA generativa, lo que permite interacciones dinámicas e impredecibles. Obtén más información aquí 👉 [Sitio Web Oficial de Suck Up!](https://www.playsuckup.com/) ## Espera… ¿Dónde están los Agentes? Después de explorar estas demos, podrías preguntarte: "Estos ejemplos muestran el uso de LLMs en juegos, pero no parecen involucrar Agentes. Entonces, ¿cuál es la distinción y qué capacidades adicionales aportan los Agentes?” No te preocupes, es lo que estudiaremos en la siguiente sección.
agents-course/units/es/bonus-unit3/state-of-art.mdx/0
{ "file_path": "agents-course/units/es/bonus-unit3/state-of-art.mdx", "repo_id": "agents-course", "token_count": 1184 }
5
# Pensamiento: Razonamiento Interno y el Enfoque Re-Act <Tip> En esta sección, profundizamos en el funcionamiento interno de un agente de IA—su capacidad para razonar y planificar. Exploraremos cómo el agente aprovecha su diálogo interno para analizar información, desglosar problemas complejos en pasos manejables y decidir qué acción tomar a continuación. Además, presentamos el enfoque Re-Act, una técnica de prompting que anima al modelo a pensar "paso a paso" antes de actuar. </Tip> Los pensamientos representan los **procesos internos de razonamiento y planificación del Agente** para resolver la tarea. Esto utiliza la capacidad del Modelo de Lenguaje Grande (LLM) del agente **para analizar información cuando se presenta en su prompt**. Piensalo como el diálogo interno del agente, donde considera la tarea en cuestión y forma la estrategia de su enfoque. Los pensamientos del Agente son responsables de acceder a las observaciones actuales y decidir cuál(es) debería(n) ser la(s) siguiente(s) acción(es). A través de este proceso, el agente puede **desglosar problemas complejos en pasos más pequeños y manejables**, reflexionar sobre experiencias pasadas y ajustar continuamente sus planes basándose en nueva información. Aquí hay algunos ejemplos de pensamientos comunes: | Tipo de Pensamiento | Ejemplo | |----------------|---------| | Planificación | "Necesito dividir esta tarea en tres pasos: 1) recopilar datos, 2) analizar tendencias, 3) generar informe" | | Análisis | "Basado en el mensaje de error, el problema parece estar en los parámetros de conexión de la base de datos" | | Toma de Decisiones | "Dadas las restricciones presupuestarias del usuario, debería recomendar la opción de nivel medio" | | Resolución de Problemas | "Para optimizar este código, primero debería perfilarlo para identificar cuellos de botella" | | Integración de Memoria | "El usuario mencionó su preferencia por Python anteriormente, así que proporcionaré ejemplos en Python" | | Auto-reflexión | "Mi último enfoque no funcionó bien, debería probar una estrategia diferente" | | Establecimiento de Objetivos | "Para completar esta tarea, primero necesito establecer los criterios de aceptación" | | Priorización | "La vulnerabilidad de seguridad debe abordarse antes de agregar nuevas características" | > **Nota:** En el caso de LLMs afinados para llamadas a funciones, el proceso de pensamiento es opcional. > *En caso de que no estés familiarizado con las llamadas a funciones, habrá más detalles en la sección de Acciones.* ## El Enfoque Re-Act Un método clave es el **enfoque ReAct**, que es la concatenación de "Razonamiento" (Think) con "Actuar" (Act). ReAct es una técnica de prompting simple que añade "Pensemos paso a paso" antes de permitir que el LLM decodifique los siguientes tokens. De hecho, indicar al modelo que piense "paso a paso" fomenta el proceso de decodificación hacia los siguientes tokens **que generan un plan**, en lugar de una solución final, ya que se anima al modelo a **descomponer** el problema en *sub-tareas*. Esto permite que el modelo considere los sub-pasos con más detalle, lo que en general conduce a menos errores que intentar generar la solución final directamente. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/ReAct.png" alt="ReAct"/> <figcaption>El (d) es un ejemplo del enfoque Re-Act donde indicamos "Pensemos paso a paso" </figcaption> </figure> <Tip> Recientemente hemos visto mucho interés por las estrategias de razonamiento. Esto es lo que está detrás de modelos como Deepseek R1 o o1 de OpenAI, que han sido afinados para "pensar antes de responder". Estos modelos han sido entrenados para incluir siempre secciones específicas de _pensamiento_ (encerradas entre tokens especiales `<think>` y `</think>`). Esto no es solo una técnica de prompting como ReAct, sino un método de entrenamiento donde el modelo aprende a generar estas secciones después de analizar miles de ejemplos que muestran lo que esperamos que haga. </Tip> --- Ahora que entendemos mejor el proceso de Pensamiento, profundicemos en la segunda parte del proceso: Actuar.
agents-course/units/es/unit1/thoughts.mdx/0
{ "file_path": "agents-course/units/es/unit1/thoughts.mdx", "repo_id": "agents-course", "token_count": 1519 }
6
# Conclusión !Felicidades por terminar el módulo `llama-index` de esta segunda Unidad 🥳 Acabas de dominar los fundamentos de `llama-index` y has visto como construir tus propias flujos de trabajo agentivos! Ahora que tienes habilidades en `llama-index`, puedes empezar a crear motores de búsqueda que resolveran tareas que te interesen. En el próximo módulo de la Unidad, aprenderas **cómo construir Agentes con LangGraph**. Finalmente, nos encantar a **saber que te parece el curso y como podemos mejorar**. Si tienes alguna retroalimentación, por favor 👉 llena este formulario: [https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### Sigue aprendiendo, y sigue siendo increible 🤗
agents-course/units/es/unit2/llama-index/conclusion.mdx/0
{ "file_path": "agents-course/units/es/unit2/llama-index/conclusion.mdx", "repo_id": "agents-course", "token_count": 377 }
7
<CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/tools.ipynb"}, ]} /> # Herramientas Como exploramos en la [unidad 1](https://huggingface.co/learn/agents-course/unit1/tools), los agentes utilizan herramientas para realizar diversas acciones. En `smolagents`, las herramientas son tratadas como **funciones que un LLM puede llamar dentro de un sistema de agentes**. Para interactuar con una herramienta, el LLM necesita una **descripción de la interfaz** con estos componentes clave: - **Nombre**: Cómo se llama la herramienta - **Descripción de la herramienta**: Qué hace la herramienta - **Tipos de entrada y descripciones**: Qué argumentos acepta la herramienta - **Tipo de salida**: Qué devuelve la herramienta Por ejemplo, mientras prepara una fiesta en la Mansión Wayne, Alfred necesita varias herramientas para recopilar información - desde buscar servicios de catering hasta encontrar ideas para temas de fiesta. Así es como podría verse la interfaz de una herramienta de búsqueda simple: - **Nombre:** `web_search` - **Descripción de la herramienta:** Busca en la web consultas específicas - **Entrada:** `query` (cadena) - El término de búsqueda a consultar - **Salida:** Cadena que contiene los resultados de la búsqueda Al utilizar estas herramientas, Alfred puede tomar decisiones informadas y recopilar toda la información necesaria para planificar la fiesta perfecta. A continuación, puedes ver una animación que ilustra cómo se gestiona una llamada a una herramienta: ![Pipeline de agente de https://huggingface.co/docs/smolagents/conceptual_guides/react](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif) ## Métodos de Creación de Herramientas En `smolagents`, las herramientas pueden definirse de dos maneras: 1. **Usando el decorador `@tool`** para herramientas simples basadas en funciones 2. **Creando una subclase de `Tool`** para funcionalidades más complejas ### El Decorador `@tool` El decorador `@tool` es **la forma recomendada para definir herramientas simples**. Internamente, smolagents analizará la información básica sobre la función desde Python. Por lo tanto, si nombras tu función claramente y escribes un buen docstring, será más fácil para el LLM utilizarla. Usando este enfoque, definimos una función con: - **Un nombre de función claro y descriptivo** que ayuda al LLM a entender su propósito. - **Anotaciones de tipo tanto para entradas como para salidas** para garantizar un uso adecuado. - **Una descripción detallada**, que incluye una sección `Args:` donde cada argumento se describe explícitamente. Estas descripciones proporcionan un contexto valioso para el LLM, por lo que es importante escribirlas cuidadosamente. #### Generando una herramienta que recupera el servicio de catering mejor valorado <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-catering.jpg" alt="Alfred Catering"/> <Tip> Puedes seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/tools.ipynb" target="_blank">este notebook</a> que puedes ejecutar usando Google Colab. </Tip> Imaginemos que Alfred ya ha decidido el menú para la fiesta, pero ahora necesita ayuda para preparar comida para un número tan grande de invitados. Para hacerlo, le gustaría contratar un servicio de catering y necesita identificar las opciones mejor valoradas disponibles. Alfred puede aprovechar una herramienta para buscar los mejores servicios de catering en su área. A continuación se muestra un ejemplo de cómo Alfred puede usar el decorador `@tool` para lograrlo: ```python from smolagents import CodeAgent, InferenceClientModel, tool # Imaginemos que tenemos una función que obtiene los servicios de catering mejor valorados. @tool def catering_service_tool(query: str) -> str: """ Esta herramienta devuelve el servicio de catering mejor valorado en Ciudad Gótica. Args: query: Un término de búsqueda para encontrar servicios de catering. """ # Lista de ejemplo de servicios de catering y sus calificaciones services = { "Gotham Catering Co.": 4.9, "Wayne Manor Catering": 4.8, "Gotham City Events": 4.7, } # Encuentra el servicio de catering mejor valorado (simulando el filtrado de consultas de búsqueda) best_service = max(services, key=services.get) return best_service agent = CodeAgent(tools=[catering_service_tool], model=InferenceClientModel()) # Ejecuta el agente para encontrar el mejor servicio de catering result = agent.run( "¿Puedes darme el nombre del servicio de catering mejor valorado en Ciudad Gótica?" ) print(result) # Salida: Gotham Catering Co. ``` ### Definiendo una Herramienta como una Clase de Python Este enfoque implica crear una subclase de [`Tool`](https://huggingface.co/docs/smolagents/v1.8.1/en/reference/tools#smolagents.Tool). Para herramientas complejas, podemos implementar una clase en lugar de una función de Python. La clase envuelve la función con metadatos que ayudan al LLM a entender cómo usarla de manera efectiva. En esta clase, definimos: - `name`: El nombre de la herramienta. - `description`: Una descripción utilizada para completar el prompt del sistema del agente. - `inputs`: Un diccionario con claves `type` y `description`, proporcionando información para ayudar al intérprete de Python a procesar las entradas. - `output_type`: Especifica el tipo de salida esperado. - `forward`: El método que contiene la lógica de inferencia a ejecutar. A continuación, podemos ver un ejemplo de una herramienta construida usando `Tool` y cómo integrarla dentro de un `CodeAgent`. #### Generando una herramienta para generar ideas sobre la fiesta temática de superhéroes La fiesta de Alfred en la mansión es un **evento temático de superhéroes**, pero necesita algunas ideas creativas para hacerla verdaderamente especial. Como anfitrión fantástico, quiere sorprender a los invitados con un tema único. Para hacer esto, puede usar un agente que genere ideas de fiestas temáticas de superhéroes basadas en una categoría dada. De esta manera, Alfred puede encontrar el tema de fiesta perfecto para impresionar a sus invitados. ```python from smolagents import Tool, CodeAgent, InferenceClientModel class SuperheroPartyThemeTool(Tool): name = "superhero_party_theme_generator" description = """ Esta herramienta sugiere ideas creativas para fiestas temáticas de superhéroes basadas en una categoría. Devuelve una idea única de tema para la fiesta.""" inputs = { "category": { "type": "string", "description": "El tipo de fiesta de superhéroes (por ejemplo, 'héroes clásicos', 'mascarada de villanos', 'Gotham futurista').", } } output_type = "string" def forward(self, category: str): themes = { "classic heroes": "Gala de la Liga de la Justicia: Los invitados vienen vestidos como sus héroes favoritos de DC con cócteles temáticos como 'El Ponche de Kryptonita'.", "villain masquerade": "Baile de los Pícaros de Gotham: Una mascarada misteriosa donde los invitados se visten como villanos clásicos de Batman.", "futuristic Gotham": "Noche Neo-Gotham: Una fiesta de estilo cyberpunk inspirada en Batman Beyond, con decoraciones de neón y gadgets futuristas." } return themes.get(category.lower(), "Idea de fiesta temática no encontrada. Prueba con 'héroes clásicos', 'mascarada de villanos' o 'Gotham futurista'.") # Instancia la herramienta party_theme_tool = SuperheroPartyThemeTool() agent = CodeAgent(tools=[party_theme_tool], model=InferenceClientModel()) # Ejecuta el agente para generar una idea de tema para la fiesta result = agent.run( "¿Cuál sería una buena idea para una fiesta de superhéroes con el tema 'mascarada de villanos'?" ) print(result) # Salida: "Baile de los Pícaros de Gotham: Una mascarada misteriosa donde los invitados se visten como villanos clásicos de Batman." ``` Con esta herramienta, ¡Alfred será el mejor anfitrión, impresionando a sus invitados con una fiesta temática de superhéroes que no olvidarán! 🦸‍♂️🦸‍♀️ ## Caja de Herramientas Predeterminada `smolagents` viene con un conjunto de herramientas preintegradas que pueden inyectarse directamente en tu agente. La [caja de herramientas predeterminada](https://huggingface.co/docs/smolagents/guided_tour?build-a-tool=Decorate+a+function+with+%40tool#default-toolbox) incluye: - **PythonInterpreterTool** - **FinalAnswerTool** - **UserInputTool** - **DuckDuckGoSearchTool** - **GoogleSearchTool** - **VisitWebpageTool** Alfred podría usar varias herramientas para asegurar una fiesta impecable en la Mansión Wayne: - Primero, podría usar la `DuckDuckGoSearchTool` para encontrar ideas creativas para fiestas temáticas de superhéroes. - Para el catering, confiaría en la `GoogleSearchTool` para encontrar los servicios mejor valorados en Gotham. - Para gestionar la distribución de asientos, Alfred podría realizar cálculos con la `PythonInterpreterTool`. - Una vez recopilado todo, compilaría el plan usando la `FinalAnswerTool`. Con estas herramientas, Alfred garantiza que la fiesta sea excepcional e impecable. 🦇💡 ## Compartir e Importar Herramientas Una de las características más poderosas de **smolagents** es su capacidad para compartir herramientas personalizadas en el Hub e integrar perfectamente herramientas creadas por la comunidad. Esto incluye la conexión con **HF Spaces** y **herramientas de LangChain**, mejorando significativamente la capacidad de Alfred para organizar una fiesta inolvidable en la Mansión Wayne. 🎭 Con estas integraciones, Alfred puede aprovechar herramientas avanzadas de planificación de eventos, ya sea ajustar la iluminación para el ambiente perfecto, seleccionar la lista de reproducción ideal para la fiesta, o coordinar con los mejores servicios de catering de Gotham. Aquí hay ejemplos que muestran cómo estas funcionalidades pueden elevar la experiencia de la fiesta: ### Compartir una Herramienta en el Hub ¡Compartir tu herramienta personalizada con la comunidad es fácil! Simplemente súbela a tu cuenta de Hugging Face usando el método `push_to_hub()`. Por ejemplo, Alfred puede compartir su `party_theme_tool` para ayudar a otros a encontrar los mejores servicios de catering en Gotham. Así es cómo hacerlo: ```python party_theme_tool.push_to_hub("{tu_nombre_de_usuario}/party_theme_tool", token="<TU_TOKEN_API_HUGGINGFACEHUB>") ``` ### Importar una Herramienta desde el Hub Puedes importar fácilmente herramientas creadas por otros usuarios usando la función `load_tool()`. Por ejemplo, Alfred podría querer generar una imagen promocional para la fiesta usando IA. En lugar de construir una herramienta desde cero, puede aprovechar una predefinida de la comunidad: ```python from smolagents import load_tool, CodeAgent, InferenceClientModel image_generation_tool = load_tool( "m-ric/text-to-image", trust_remote_code=True ) agent = CodeAgent( tools=[image_generation_tool], model=InferenceClientModel() ) agent.run("Genera una imagen de una lujosa fiesta temática de superhéroes en la Mansión Wayne con superhéroes inventados.") ``` ### Importar un Hugging Face Space como Herramienta También puedes importar un HF Space como herramienta usando `Tool.from_space()`. Esto abre posibilidades para integrar miles de spaces de la comunidad para tareas desde generación de imágenes hasta análisis de datos. La herramienta se conectará con el backend Gradio del space usando `gradio_client`, así que asegúrate de instalarlo via `pip` si aún no lo tienes. Para la fiesta, Alfred puede usar un HF Space existente para la generación de la imagen generada por IA que se usará en el anuncio (en lugar de la herramienta preintegrada que mencionamos antes). ¡Vamos a construirla! ```python from smolagents import CodeAgent, InferenceClientModel, Tool image_generation_tool = Tool.from_space( "black-forest-labs/FLUX.1-schnell", name="image_generator", description="Generar una imagen a partir de un prompt" ) model = InferenceClientModel("Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( "Mejora este prompt, luego genera una imagen del mismo.", additional_args={'user_prompt': 'Una gran fiesta temática de superhéroes en la Mansión Wayne, con Alfred supervisando una lujosa gala'} ) ``` ### Importar una Herramienta de LangChain Discutiremos el framework `LangChain` en las próximas secciones. Por ahora, solo notamos que ¡podemos reutilizar herramientas de LangChain en tu flujo de trabajo de smolagents! Puedes cargar fácilmente herramientas de LangChain usando el método `Tool.from_langchain()`. Alfred, siempre perfeccionista, está preparando una espectacular noche de superhéroes en la Mansión Wayne mientras los Wayne están fuera. Para asegurarse de que cada detalle supere las expectativas, aprovecha las herramientas de LangChain para encontrar ideas de entretenimiento de primera categoría. Al usar `Tool.from_langchain()`, Alfred añade sin esfuerzo funcionalidades de búsqueda avanzadas a su smolagent, permitiéndole descubrir ideas y servicios exclusivos para fiestas con solo unos pocos comandos. Así es como lo hace: ```python from langchain.agents import load_tools from smolagents import CodeAgent, InferenceClientModel, Tool search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) agent = CodeAgent(tools=[search_tool], model=model) agent.run("Busca ideas de entretenimiento de lujo para un evento temático de superhéroes, como actuaciones en vivo y experiencias interactivas.") ``` Con esta configuración, Alfred puede descubrir rápidamente opciones de entretenimiento lujosas, asegurando que los invitados de élite de Gotham tengan una experiencia inolvidable. ¡Esta herramienta le ayuda a organizar el evento temático de superhéroes perfecto para la Mansión Wayne! 🎉 ## Recursos - [Tutorial de Herramientas](https://huggingface.co/docs/smolagents/tutorials/tools) - Explora este tutorial para aprender a trabajar efectivamente con herramientas. - [Documentación de Herramientas](https://huggingface.co/docs/smolagents/v1.8.1/en/reference/tools) - Documentación de referencia completa sobre herramientas. - [Tour Guiado de Herramientas](https://huggingface.co/docs/smolagents/v1.8.1/en/guided_tour#tools) - Un tour guiado paso a paso para ayudarte a construir y utilizar herramientas eficientemente. - [Construyendo Agentes Efectivos](https://huggingface.co/docs/smolagents/tutorials/building_good_agents) - Una guía detallada sobre mejores prácticas para desarrollar agentes de función personalizados fiables y de alto rendimiento.
agents-course/units/es/unit2/smolagents/tools.mdx/0
{ "file_path": "agents-course/units/es/unit2/smolagents/tools.mdx", "repo_id": "agents-course", "token_count": 5480 }
8
# ¿Qué es GAIA? [GAIA](https://huggingface.co/papers/2311.12983) es un **benchmark diseñado para evaluar asistentes de IA en tareas del mundo real** que requieren una combinación de capacidades centrales, como razonamiento, comprensión multimodal, navegación web y uso competente de herramientas. Fue introducido en el artículo _"[GAIA: A Benchmark for General AI Assistants](https://huggingface.co/papers/2311.12983) (en inglés)"_. El benchmark presenta **466 preguntas cuidadosamente seleccionadas** que son **conceptualmente simples para los humanos**, pero **notablemente desafiantes para los sistemas de IA actuales**. Para ilustrar la brecha: - **Humanos**: Tasa de éxito de ~92% - **GPT-4 con plugins**: ~15% - **Deep Research (OpenAI)**: 67.36% en el conjunto de validación GAIA destaca las limitaciones actuales de los modelos de IA y proporciona un benchmark riguroso para evaluar el progreso hacia asistentes de IA verdaderamente de propósito general. ## 🌱 Principios Fundamentales de GAIA GAIA está cuidadosamente diseñado en torno a los siguientes pilares: - 🔍 **Dificultad del mundo real**: Las tareas requieren razonamiento de varios pasos, comprensión multimodal e interacción con herramientas. - 🧾 **Interpretabilidad humana**: A pesar de su dificultad para la IA, las tareas siguen siendo conceptualmente simples y fáciles de seguir para los humanos. - 🛡️ **No manipulable (Non-gameability)**: Las respuestas correctas exigen la ejecución completa de la tarea, lo que hace ineficaz el forzamiento bruto (brute-forcing). - 🧰 **Simplicidad de evaluación**: Las respuestas son concisas, factuales y sin ambigüedades, ideales para el benchmarking. ## Niveles de Dificultad Las tareas de GAIA se organizan en **tres niveles de complejidad creciente**, cada uno probando habilidades específicas: - **Nivel 1**: Requiere menos de 5 pasos y un uso mínimo de herramientas. - **Nivel 2**: Implica un razonamiento más complejo y la coordinación entre múltiples herramientas y 5-10 pasos. - **Nivel 3**: Exige planificación a largo plazo e integración avanzada de diversas herramientas. ![Niveles de GAIA](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/gaia_levels.png) ## Ejemplo de una Pregunta Difícil de GAIA > ¿Cuáles de las frutas mostradas en la pintura de 2008 "Bordado de Uzbekistán" se sirvieron como parte del menú de desayuno de octubre de 1949 para el transatlántico que luego se usó como utilería flotante para la película "El último viaje"? Enumera los elementos como una lista separada por comas, ordenándolos en el sentido de las agujas del reloj según su disposición en la pintura, comenzando desde la posición de las 12 en punto. Usa la forma plural de cada fruta. Como puedes ver, esta pregunta desafía a los sistemas de IA de varias maneras: - Requiere un **formato de respuesta estructurado** - Implica **razonamiento multimodal** (por ejemplo, analizar imágenes) - Exige **recuperación de múltiples saltos** (multi-hop retrieval) de hechos interdependientes: - Identificar las frutas en la pintura - Descubrir qué transatlántico se usó en *El último viaje* - Buscar el menú de desayuno de octubre de 1949 para ese barco - Necesita **secuenciación correcta** y planificación de alto nivel para resolver en el orden correcto Este tipo de tarea resalta dónde los LLM independientes a menudo se quedan cortos, lo que convierte a GAIA en un benchmark ideal para **sistemas basados en agentes** que pueden razonar, recuperar y ejecutar a lo largo de múltiples pasos y modalidades. ![Gráfico de capacidades de GAIA](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/gaia_capabilities.png) ## Evaluación en Vivo Para fomentar el benchmarking continuo, **GAIA proporciona una tabla de clasificación pública alojada en Hugging Face**, donde puedes probar tus modelos contra **300 preguntas de prueba**. 👉 Revisa la tabla de clasificación [aquí](https://huggingface.co/spaces/gaia-benchmark/leaderboard) <iframe src="https://gaia-benchmark-leaderboard.hf.space" frameborder="0" width="850" height="450" ></iframe> ¿Quieres profundizar más en GAIA? - 📄 [Lee el artículo completo (en inglés)](https://huggingface.co/papers/2311.12983) - 📄 [Publicación de lanzamiento de Deep Research por OpenAI (en inglés)](https://openai.com/index/introducing-deep-research/) - 📄 [DeepResearch de código abierto – Liberando nuestros agentes de búsqueda (en inglés)](https://huggingface.co/blog/open-deep-research)
agents-course/units/es/unit4/what-is-gaia.mdx/0
{ "file_path": "agents-course/units/es/unit4/what-is-gaia.mdx", "repo_id": "agents-course", "token_count": 1674 }
9
# Que sont les outils ? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-2.jpg" alt="Planification de l'Unité 1"/> Un aspect crucial des agents est leur capacité à prendre des **actions**. Comme nous l'avons vu, cela se fait par l'utilisation d'**outils**. Dans cette section, nous verrons ce que sont les outils, comment les concevoir efficacement, et comment les intégrer à votre agent via le message système. En fournissant à votre agent les bons outils — et en décrivant clairement le fonctionnement de ces outils — vous pouvez augmenter de manière spectaculaire ce que votre IA peut accomplir. Plongeons-nous dedans ! ## Que sont les outils d'IA ? Un **outil est une fonction fournie au LLM**. Cette fonction doit remplir un **objectif clair**. Voici quelques outils couramment utilisés dans les agents : | Outil | Description | |---------------------|-------------------------------------------------------------------------------------------------| | Recherche Web | Permet à l'agent de récupérer des informations à jour depuis Internet. | | Génération d'images | Crée des images à partir de descriptions textuelles. | | Recherche | Récupère des informations à partir d'une source externe. | | Interface API | Interagit avec une API externe (GitHub, YouTube, Spotify, etc.). | Ce ne sont que des exemples, car en réalité, vous pouvez créer un outil pour n'importe quel cas d'utilisation ! Un bon outil doit être quelque chose qui **complémente la puissance d'un LLM**. Par exemple, si vous devez effectuer des opérations arithmétiques, fournir une **calculatrice** à votre LLM donnera de meilleurs résultats que de se fier aux capacités natives du modèle. De plus, **les LLM prédisent la complétion du *prompt* en se basant sur leurs données d'entraînement**, ce qui signifie que leur connaissance interne n'inclut que les événements antérieurs à leur entraînement. Par conséquent, si votre agent a besoin de données à jour, vous devez les fournir via un outil. Par exemple, si vous demandez directement à un LLM (sans outil de recherche) la météo d'aujourd'hui, le LLM pourrait inventer une météo aléatoire. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/weather.jpg" alt="Météo"/> - Un outil doit contenir : - Une **description textuelle de ce que fait la fonction**. - Un *appeleur* (quelque chose pour effectuer une action). - Des *arguments* avec typage. - (Optionnel) Des sorties avec typage. ## Comment fonctionnent les outils ? Comme nous l'avons vu, les LLM ne peuvent recevoir que des entrées textuelles et générer des sorties textuelles. Ils ne peuvent pas appeler des outils par eux-mêmes. Lorsque nous parlons de fournir des outils à un agent, nous entendons enseigner au LLM l'existence de ces outils et lui demander de générer des invocations textuelles en cas de besoin. Par exemple, si nous fournissons un outil pour vérifier le temps qu'il fait à un endroit donné à partir d'internet et que nous demandons ensuite au LLM le temps qu'il fait à Paris, le LLM reconnaîtra qu'il s'agit d'une occasion d'utiliser l'outil « météo ». Au lieu de récupérer les données météorologiques elles-mêmes, le LLM générera un texte pour appeller l'outil, tel que ``call weather_tool("Paris")`. L'agent lit alors cette réponse, identifie qu'un appel d'outil est nécessaire, exécute l'outil au nom du LLM et récupère les données météorologiques réelles. Les étapes de l'appel d'outil ne sont généralement pas montrées à l'utilisateur : l'agent les ajoute à un nouveau message avant de transmettre à nouveau la conversation mise à jour au LLM. Le LLM traite alors ce contexte supplémentaire et génère une réponse naturelle pour l'utilisateur. Du point de vue de l'utilisateur, il semble que le LLM interagisse directement avec l'outil, mais en réalité, c'est l'agent qui gère l'ensemble du processus d'exécution en arrière-plan. Nous reviendrons plus en détail sur ce processus dans les prochaines sessions. ## Comment fournir des outils à un LLM ? La réponse complète peut sembler complexe, mais nous utilisons essentiellement le *prompt* système pour fournir au modèle des descriptions textuelles des outils disponibles : <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt.png" alt="Prompt système pour les outils"/> Pour que cela fonctionne, nous devons être très précis et rigoureux concernant : 1. **Ce que fait l'outil** 2. **Les entrées exactes qu'il attend** C'est la raison pour laquelle les descriptions d'outils sont généralement fournies en utilisant des structures expressives mais précises, telles que des langages informatiques ou du JSON. Il n'est pas _nécessaire_ de procéder ainsi, tout format précis et cohérent fonctionnerait. Si cela semble trop théorique, voyons cela à travers un exemple concret. Nous allons implémenter un outil simplifié **calculatrice** qui se contentera de multiplier deux entiers. Voici une implémentation en Python : ```python def calculator(a: int, b: int) -> int: """Multiplie deux entiers.""" return a * b print(calculator.to_string()) ``` Ainsi, notre outil s'appelle `calculator`, il **multiplie deux entiers**, et il requiert les entrées suivantes : - **`a`** (*int*): Un entier. - **`b`** (*int*): Un entier. La sortie de l'outil est un autre nombre entier que nous pouvons décrire ainsi : - (*int*): Le produit de `a` et `b`. Tous ces détails sont importants. Rassemblons-les dans une chaîne de texte qui décrit notre outil pour que le LLM puisse le comprendre. ``` Nom de l'outil: calculator, Description: Multiplie deux entiers., Arguments: a: int, b: int, Sorties: int ``` > **Rappel :** Cette description textuelle est *ce que nous voulons que le LLM sache à propos de l'outil*. Lorsque nous passons la chaîne précédente dans l'entrée du LLM, le modèle la reconnaîtra comme un outil et saura quelles entrées fournir et ce qu'il doit attendre en sortie. Si nous souhaitons fournir des outils supplémentaires, nous devons rester cohérents et utiliser toujours le même format. Ce processus peut être fragile, et nous pourrions accidentellement négliger certains détails. Existe-t-il une meilleure méthode ? ### Sections d'auto-formatage des outils Notre outil a été écrit en Python, et l'implémentation fournit déjà tout ce dont nous avons besoin : - Un nom descriptif de ce qu'il fait : `calculator` - Une description plus détaillée, fournie par le commentaire docstring de la fonction : `Multiplie deux entiers.` - Les entrées et leur type : la fonction attend clairement deux `int`. - Le type de la sortie. Il y a une raison pour laquelle on utilise des langages de programmation : ils sont expressifs, concis et précis. Nous pourrions fournir le code source Python comme _spécification_ de l'outil pour le LLM, mais la manière dont l'outil est implémenté n'a pas d'importance. Tout ce qui compte, c'est son nom, ce qu'il fait, les entrées qu'il attend et la sortie qu'il fournit. Nous tirerons parti des fonctionnalités d'introspection de Python pour exploiter le code source et construire automatiquement une description de l'outil. Tout ce dont nous avons besoin, c'est que l'implémentation de l'outil utilise des annotations de types, des docstrings et des noms de fonction pertinents. Nous écrirons un peu de code pour extraire les parties pertinentes du code source. Une fois cela fait, il nous suffira d'utiliser un décorateur Python pour indiquer que la fonction `calculator` est un outil : ```python @tool def calculator(a: int, b: int) -> int: """Multiplie deux entiers.""" return a * b print(calculator.to_string()) ``` Notez le décorateur `@tool` avant la définition de la fonction. Avec l'implémentation que nous verrons ensuite, nous serons capables d'extraire automatiquement le texte suivant à partir du code source : ``` Nom de l'outil: calculator, Description: Multiplie deux entiers., Arguments: a: int, b: int, Sorties: int ``` Comme vous pouvez le constater, c'est la même chose que nous avons écrit manuellement précédemment ! ### Implémentation générique d'un outil Nous créons une classe générique `Tool` que nous pouvons réutiliser chaque fois que nous avons besoin d'utiliser un outil. > **Avertissement :** Cette implémentation à titre d'exemple est fictive mais ressemble de près aux implémentations réelles dans la plupart des bibliothèques. ```python class Tool: """ Une classe représentant un morceau de code réutilisable (Outil). Attributs: name (str): Nom de l'outil. description (str): Une description textuelle de ce que fait l'outil. func (callable): La fonction que cet outil encapsule. arguments (list): Une liste d'arguments. outputs (str ou list): Le(s) type(s) de retour de la fonction encapsulée. """ def __init__(self, name: str, description: str, func: callable, arguments: list, outputs: str): self.name = name self.description = description self.func = func self.arguments = arguments self.outputs = outputs def to_string(self) -> str: """ Retourne une représentation sous forme de chaîne de l'outil, incluant son nom, sa description, ses arguments, et ses sorties. """ args_str = ", ".join([ f"{arg_name}: {arg_type}" for arg_name, arg_type in self.arguments ]) return ( f"Tool Name: {self.name}," f" Description: {self.description}," f" Arguments: {args_str}," f" Outputs: {self.outputs}" ) def __call__(self, *args, **kwargs): """ Invoque la fonction sous-jacente (callable) avec les arguments fournis. """ return self.func(*args, **kwargs) ``` Cela peut sembler compliqué, mais en y allant pas à pas, nous pouvons voir ce qu'elle fait. Nous définissons une classe **`Tool`** qui inclut : - **`name`** (*str*): Le nom de l'outil. - **`description`** (*str*): Une brève description de ce que fait l'outil. - **`function`** (*callable*): La fonction que l'outil exécute. - **`arguments`** (*list*): Les paramètres d'entrée attendus. - **`outputs`** (*str* ou *list*): Les sorties attendues de l'outil. - **`__call__()`** : Appelle la fonction lorsque l'instance de l'outil est invoquée. - **`to_string()`** : Convertit les attributs de l'outil en une représentation textuelle. Nous pourrions créer un outil avec cette classe en utilisant le code suivant : ```python calculator_tool = Tool( "calculator", # nom "Multiplie deux entiers.", # description calculator, # fonction à appeler [("a", "int"), ("b", "int")], # entrées (noms et types) "int", # sortie ) ``` Mais nous pouvons également utiliser le module `inspect` de Python pour récupérer toutes les informations pour nous ! C'est ce que fait le décorateur `@tool`. > Si cela vous intéresse, vous pouvez afficher la section suivante pour voir l'implémentation du décorateur. <details> <summary>code du décorateur</summary> ```python def tool(func): """ Un décorateur qui crée une instance de Tool à partir de la fonction donnée. """ # Récupérer la signature de la fonction signature = inspect.signature(func) # Extraire les paires (nom_param, annotation_param) pour les entrées arguments = [] for param in signature.parameters.values(): annotation_name = ( param.annotation.__name__ if hasattr(param.annotation, '__name__') else str(param.annotation) ) arguments.append((param.name, annotation_name)) # Déterminer l'annotation de retour return_annotation = signature.return_annotation if return_annotation is inspect._empty: outputs = "Pas d'annotation de retour" else: outputs = ( return_annotation.__name__ if hasattr(return_annotation, '__name__') else str(return_annotation) ) # Utiliser la docstring de la fonction comme description (par défaut si vide) description = func.__doc__ or "No description provided." # Le nom de la fonction devient le nom de l'outil name = func.__name__ # Retourner une nouvelle instance de Tool return Tool( name=name, description=description, func=func, arguments=arguments, outputs=outputs ) ``` </details> Pour réitérer, avec ce décorateur en place, nous pouvons implémenter notre outil comme ceci : ```python @tool def calculator(a: int, b: int) -> int: """Multiplie deux entiers.""" return a * b print(calculator.to_string()) ``` Et nous pouvons utiliser la méthode `to_string` de `Tool` pour récupérer automatiquement un texte adapté à être utilisé comme description d'un outil pour un LLM : ``` Nom de l'outil: calculator, Description: Multiplie deux entiers., Arguments: a: int, b: int, Sorties: int ``` La description est **injectée** dans le *prompt* système. En reprenant l'exemple avec lequel nous avons commencé cette section, voici à quoi cela ressemblerait après avoir remplacé le `tools_description` : <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt_tools.png" alt="Prompt système pour les outils"/> Dans la section sur les [actions](actions), nous en apprendrons davantage sur la façon dont un agent peut **appeler** cet outil que nous venons de créer. --- Les outils jouent un rôle crucial dans l'amélioration des capacités des agents. ### *Model Context Protocol* (MCP) : une interface d'outils unifiée *Model Context Protocol* (MCP) est un **protocole ouvert** qui standardise la manière dont les applications **fournissent des outils aux LLM**. MCP offre : - Une liste croissante d'intégrations pré-construites que votre LLM peut directement utiliser - La flexibilité de changer entre fournisseurs et vendeurs de LLM - Les meilleures pratiques pour sécuriser vos données dans votre infrastructure Cela signifie que **tout *framework* intégrant MCP peut utiliser les outils définis dans le protocole**, éliminant le besoin de réimplémenter la même interface d'outils pour chaque *framework*. Si vous voulez approfondir MCP, vous pouvez consulter notre [cours gratuit sur MCP](https://huggingface.co/learn/mcp-course/). --- Les outils jouent un rôle crucial dans l'amélioration des capacités des agents. Pour résumer, nous avons appris : - *Ce que sont les outils* : des fonctions qui offrent des capacités supplémentaires aux LLM, comme effectuer des calculs ou accéder à des données externes. - *Comment définir un outil* : en fournissant une description textuelle claire, des entrées, des sorties, et une fonction exécutable. - *Pourquoi les outils sont essentiels* : ils permettent aux agents de surmonter les limites de l'entraînement statique des modèles, de gérer des tâches en temps réel, et d'effectuer des actions spécialisées. Maintenant, nous pouvons passer au [*workflow* de l'agent](agent-steps-and-structure) où vous verrez comment un agent observe, réfléchit et agit. Cela **rassemble tout ce que nous avons vu jusqu'à présent** et prépare le terrain pour créer votre propre agent entièrement fonctionnel. Mais d'abord, il est temps pour un autre court quiz !
agents-course/units/fr/unit1/tools.mdx/0
{ "file_path": "agents-course/units/fr/unit1/tools.mdx", "repo_id": "agents-course", "token_count": 6144 }
10
<CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/smolagents/vision_agents.ipynb"}, ]} askForHelpUrl="http://hf.co/join/discord" /> # Agents visuel avec smolagents <Tip warning={true}> Les exemples de cette section nécessitent l'accès à un modèle VLM puissant. Nous les avons testés en utilisant l'API GPT-4o. Cependant, <a href="./why_use_smolagents">Pourquoi utiliser smolagents</a> discute des solutions alternatives supportées par smolagents et Hugging Face. Si vous souhaitez explorer d'autres options, assurez-vous de consulter cette section. </Tip> Doter les agents de capacités visuelles est crucial pour résoudre des tâches qui vont au-delà du traitement de texte. De nombreux défis du monde réel, comme la navigation web ou la compréhension de documents, nécessitent d'analyser un contenu visuel riche. Heureusement, `smolagents` fournit un support intégré pour les modèles de vision-langage (VLM), permettant aux agents de traiter et d'interpréter efficacement les images. Dans cet exemple, imaginez qu'Alfred soit chargé de vérifier les identités des invités assistant à la fête. Comme vous pouvez l'imaginer, Alfred pourrait ne pas être familier avec tout le monde. Pour l'aider, nous pouvons utiliser un agent qui vérifie leur identité en recherchant des informations visuelles sur leur apparence en utilisant un VLM. Cela permettra à Alfred de prendre des décisions éclairées sur qui peut entrer. Construisons cet exemple ! ## Fournir des images au début de l'exécution de l'agent <Tip> Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/smolagents/vision_agents.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab. </Tip> Dans cette approche, les images sont transmises à l'agent au début et stockées comme `task_images` avec le *prompt* de tâche. L'agent traite ensuite ces images tout au long de son exécution. Considérez le cas où Alfred veut vérifier les identités des super-héros assistant à la fête. Il a déjà un jeu de données d'images de fêtes précédentes avec les noms des invités. Étant donné l'image d'un nouveau visiteur, l'agent peut la comparer avec le jeu de données existant et prendre une décision sur leur entrée. Dans ce cas, un invité essaie d'entrer, et Alfred soupçonne que ce visiteur pourrait être le Joker se faisant passer pour Wonder Woman. Alfred doit vérifier les identités pour empêcher quiconque d'indésirable d'entrer. Construisons l'exemple. D'abord, les images sont chargées. Dans ce cas, nous utilisons des images de Wikipédia pour garder l'exemple minimaliste, mais imaginez les cas d'usage possibles ! ```python from PIL import Image import requests from io import BytesIO image_urls = [ "https://upload.wikimedia.org/wikipedia/commons/e/e8/The_Joker_at_Wax_Museum_Plus.jpg", # Image du Joker "https://upload.wikimedia.org/wikipedia/en/9/98/Joker_%28DC_Comics_character%29.jpg" # Image du Joker ] images = [] for url in image_urls: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" } response = requests.get(url,headers=headers) image = Image.open(BytesIO(response.content)).convert("RGB") images.append(image) ``` Maintenant que nous avons les images, l'agent nous dira si un invité est vraiment un super-héros (Wonder Woman) ou un méchant (le Joker). ```python from smolagents import CodeAgent, OpenAIServerModel model = OpenAIServerModel(model_id="gpt-4o") # Instancier l'agent agent = CodeAgent( tools=[], model=model, max_steps=20, verbosity_level=2 ) response = agent.run( """ Décrire le costume et le maquillage que porte le personnage de bande dessinée figurant sur ces photos et renvoyer la description. Indiquer si l'invité est le Joker ou Wonder Woman. """, images=images ) ``` Dans le cas de mon exécution, la sortie est la suivante, bien qu'elle puisse varier dans votre cas, comme nous l'avons déjà discuté : ```python { 'Costume et maquillage - Première image': ( 'Manteau violet et une cravate ou nœud papillon de soie violette sur une chemise jaune moutarde.', 'Peinture faciale blanche avec des traits exagérés, sourcils sombres, maquillage des yeux bleu, lèvres rouges formant un large sourire.' ), 'Costume et maquillage - Deuxième image': ( 'Costume sombre avec une fleur sur le revers, tenant une carte à jouer.', 'Peau pâle, cheveux verts, lèvres très rouges avec un sourire exagéré.' ), 'Identité du personnage': 'Ce personnage ressemble aux représentations connues du Joker des médias de bande dessinée.' } ``` Dans ce cas, la sortie révèle que la personne se fait passer pour quelqu'un d'autre, donc nous pouvons empêcher le Joker d'entrer à la fête ! ## Fournir des images avec recherche dynamique <Tip> Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_web_browser.py" target="_blank">ce fichier Python</a> </Tip> L'approche précédente est précieuse et a de nombreux cas d'usage potentiels. Cependant, dans des situations où l'invité n'est pas dans la base de données, nous devons explorer d'autres façons de les identifier. Une solution possible est de récupérer dynamiquement des images et des informations à partir de sources externes, comme naviguer sur le web pour des détails. Dans cette approche, les images sont ajoutées dynamiquement à la mémoire de l'agent pendant l'exécution. Comme nous le savons, les agents dans `smolagents` sont basés sur la classe `MultiStepAgent`, qui est une abstraction du *framework ReAct*. Cette classe opère dans un cycle structuré où diverses variables et connaissances sont enregistrées à différentes étapes : 1. **SystemPromptStep :** Stocke le *prompt* système. 2. **TaskStep :** Enregistre la requête utilisateur et toute entrée fournie. 3. **ActionStep :** Capture les logs des actions de l'agent et les résultats. Cette approche structurée permet aux agents d'incorporer des informations visuelles dynamiquement et de répondre de manière adaptative aux tâches évolutives. Ci-dessous se trouve le diagramme que nous avons déjà vu, illustrant le processus de flux de travail dynamique et comment différentes étapes s'intègrent dans le cycle de vie de l'agent. Lors de la navigation, l'agent peut prendre des captures d'écran et les sauvegarder comme `observation_images` dans l'`ActionStep`. ![Récupération d'images dynamique](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/smolagents-can-see/diagram_adding_vlms_smolagents.png) Maintenant que nous comprenons le besoin, construisons notre exemple complet. Dans ce cas, Alfred veut un contrôle total sur le processus de vérification des invités, donc naviguer pour des détails devient une solution viable. Pour compléter cet exemple, nous avons besoin d'un nouvel ensemble d'outils pour l'agent. De plus, nous utiliserons Selenium et Helium, qui sont des outils d'automatisation de navigateur. Cela nous permettra de construire un agent qui explore le web, recherchant des détails sur un invité potentiel et récupérant des informations de vérification. Installons les outils nécessaires : ```bash pip install "smolagents[all]" helium selenium python-dotenv ``` Nous aurons besoin d'un ensemble d'outils d'agent spécifiquement conçus pour la navigation, tels que `search_item_ctrl_f`, `go_back` et `close_popups`. Ces outils permettent à l'agent d'agir comme une personne naviguant sur le web. ```python @tool def search_item_ctrl_f(text: str, nth_result: int = 1) -> str: """ Recherche du texte sur la page actuelle via Ctrl + F et saute à la nième occurrence. Args: text: Le texte à rechercher nth_result: Quelle occurrence aller (par défaut: 1) """ elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]") if nth_result > len(elements): raise Exception(f"Correspondance n°{nth_result} non trouvée (seulement {len(elements)} correspondances trouvées)") result = f"Trouvé {len(elements)} correspondances pour '{text}'." elem = elements[nth_result - 1] driver.execute_script("arguments[0].scrollIntoView(true);", elem) result += f"Focalisé sur l'élément {nth_result} de {len(elements)}" return result @tool def go_back() -> None: """Retourne à la page précédente.""" driver.back() @tool def close_popups() -> str: """ Ferme tout modal ou pop-up visible sur la page. Utilise ceci pour fermer les fenêtres pop-up ! Cela ne fonctionne pas sur les bannières de consentement de cookies. """ webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform() ``` Nous avons également besoin de fonctionnalités pour sauvegarder des captures d'écran, car ce sera une partie essentielle de ce que notre agent *VLM* utilise pour accomplir la tâche. Cette fonctionnalité prend la capture d'écran et la sauvegarde dans `step_log.observations_images = [image.copy()]`, permettant à l'agent de stocker et traiter les images dynamiquement pendant qu'il navigue. ```python def save_screenshot(step_log: ActionStep, agent: CodeAgent) -> None: sleep(1.0) # Laisser les animations JavaScript se produire avant de prendre la capture d'écran driver = helium.get_driver() current_step = step_log.step_number if driver is not None: for step_logs in agent.logs: # Supprimer les captures d'écran précédentes des logs pour un traitement allégé if isinstance(step_log, ActionStep) and step_log.step_number <= current_step - 2: step_logs.observations_images = None png_bytes = driver.get_screenshot_as_png() image = Image.open(BytesIO(png_bytes)) print(f"Capture d'écran de navigateur capturée : {image.size} pixels") step_log.observations_images = [image.copy()] # Créer une copie pour s'assurer qu'elle persiste, important ! # Mettre à jour les observations avec l'URL actuelle url_info = f"URL actuelle : {driver.current_url}" step_log.observations = url_info if step_logs.observations is None else step_log.observations + "\n" + url_info return ``` Cette fonction est passée à l'agent comme `step_callback`, car elle est déclenchée à la fin de chaque étape pendant l'exécution de l'agent. Cela permet à l'agent de capturer et stocker dynamiquement des captures d'écran tout au long de son processus. Maintenant, nous pouvons générer notre agent de vision pour naviguer sur le web, en lui fournissant les outils que nous avons créés, avec le `DuckDuckGoSearchTool` pour explorer le web. Cet outil aidera l'agent à récupérer les informations nécessaires pour vérifier les identités des invités basées sur des indices visuels. ```python from smolagents import CodeAgent, OpenAIServerModel, DuckDuckGoSearchTool model = OpenAIServerModel(model_id="gpt-4o") agent = CodeAgent( tools=[DuckDuckGoSearchTool(), go_back, close_popups, search_item_ctrl_f], model=model, additional_authorized_imports=["helium"], step_callbacks=[save_screenshot], max_steps=20, verbosity_level=2, ) ``` Avec cela, Alfred est prêt à vérifier les identités des invités et prendre des décisions éclairées sur s'il faut les laisser entrer ou non à la fête : ```python agent.run(""" Je suis Alfred, le majordome du manoir Wayne, chargé de vérifier l'identité des invités à une fête. Une super-héroïne se présente à l'entrée en prétendant être Wonder Woman, mais je dois vérifier si elle est bien celle qu'elle prétend être. Veuillez rechercher des images de Wonder Woman et générer une description visuelle détaillée à partir de ces images. De plus, naviguez sur Wikipédia pour recueillir des détails clés sur son apparence. Grâce à ces informations, je pourrai déterminer s'il convient de lui accorder l'accès à l'événement. """ + helium_instructions) ``` Vous pouvez voir que nous incluons `helium_instructions` dans le cadre de la tâche. Ce *prompt* spécial vise à contrôler la navigation de l'agent, s'assurant qu'il suit les bonnes étapes lors de la navigation web. Voyons comment cela fonctionne dans la vidéo ci-dessous : <iframe width="560" height="315" src="https://www.youtube.com/embed/rObJel7-OLc?si=TnNwQ8rqXqun_pqE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> C'est la sortie finale : ```python Réponse finale : Wonder Woman est typiquement représentée portant un bustier rouge et or, un short ou une jupe bleu avec des étoiles blanches, un tiare doré, des bracelets argentés et un lasso de vérité doré. Elle est la Princesse Diana de Themyscira, connue sous le nom de Diana Prince dans le monde des hommes. ``` Avec tout cela, nous avons créé avec succès notre vérificateur d'identité pour la fête ! Alfred a maintenant les outils nécessaires pour s'assurer que seuls les bons invités franchissent la porte. Tout est prêt pour passer du bon temps au manoir Wayne ! ## Lectures complémentaires - [Nous venons de donner la vue à smolagents](https://huggingface.co/blog/smolagents-can-see) - Blog décrivant la fonctionnalité d'agent visuel. - [Automatisation de navigateur web avec agents 🤖🌐](https://huggingface.co/docs/smolagents/examples/web_browser) - Exemple pour la navigation web utilisant un agent visuel. - [Exemple d'agent visuel pour navigateur web](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py) - Exemple pour la navigation web utilisant un agent visuel.
agents-course/units/fr/unit2/smolagents/vision_agents.mdx/0
{ "file_path": "agents-course/units/fr/unit2/smolagents/vision_agents.mdx", "repo_id": "agents-course", "token_count": 5099 }
11
# (선택 섹션) Discord 101 [[discord-101]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/discord-etiquette.jpg" alt="The Discord Etiquette" width="100%"/> 이 가이드는 게임 및 머신러닝(Machine Learning) 커뮤니티에서 인기 있는 무료 채팅 플랫폼, 디스코드(Discord)를 처음 사용하는 분들을 위한 안내서입니다. Hugging Face 커뮤니티 Discord 서버에서는 **10만 명 이상**의 멤버가 활동하고 있습니다. 아래 <a href="https://discord.gg/UrrTSsSyjb" target="_blank">링크</a>를 클릭하여 참여해 보세요!다른 사용자들과 만날 수 있는 좋은 장소입니다! ## Hugging Face 디스코드 채널에서 Agent 코스를 만나보세요! Starting on Discord can be a bit overwhelming, so here's a quick guide to help you navigate. 디스코드를 처음 이용하신다면 다소 낯설 수 있어, 퀵가이드를 준비했습니다. <!-- Not the case anymore, you'll be prompted to choose your interests. Be sure to select **"AI Agents"** to gain access to the AI Agents Category, which includes all the course-related channels. Feel free to explore and join additional channels if you wish! 🚀--> Hugging Face 커뮤니티 서버에서는 논문 토론, 이벤트 등 다양한 분야에 활발한 커뮤니티입니다. 먼저 [회원가입](http://hf.co/join/discord) 하신 후, `#introduce-yourself` 채널에서 간단한 자기소개를 남겨주세요! 이 서버에는 에이전트 코스 전용 4가지 채널이 있습니다! : - `agents-course-announcements`: **최신 코스 소식**을 확인하는 공간 - `🎓-agents-course-general`: **자유로운 대화와 토론** 을 위한 공간 - `agents-course-questions`: **질문 & 동료들과 도움 주고 받기** 위한 공간 - `agents-course-showcase`: **자신이 만든 최고의 AI 에이전트를 공유** 하기 위한 공간 추가로 : - `smolagents`: **라이브러리에 대한 논의 및 지원** 을 받을 수 있습니다. ## 디스코드 활용 팁 ### 서버에 참여하는 방법 Discord가 익숙하지 않다면, 서버 참여 방법에 대한 <a href="https://support.discord.com/hc/en-us/articles/360034842871-How-do-I-join-a-Server#h_01FSJF9GT2QJMS2PRAW36WNBS8" target="_blank">공식 가이드</a>를 참고하세요! 간단한 절차는 다음과 같습니다 : 1. <a href="https://discord.gg/UrrTSsSyjb" target="_blank">초대 링크</a>를 클릭합니다. 2. 디스코드 계정으로 로그인 하거나, 새 계정을 만듭니다. 3. 본인이 AI 에이전트가 아님을 인증하세요! 4. 별명과 아바타를 설정합니다. 5. "서버 참여(Join Server)"를 클릭합니다! ### 디스코드 효과적으로 활용하기 디스코드를 효과적으로 활용할 수 있는 몇 가지 팁! - **음성 채널** 도 제공되지만, 일반적으로는 텍스트 채팅이 더 많이 사용됩니다. - **마크다운 형식**을 사용할 수 있어, 코드 작성시 유용합니다. 링크에는 마크다운 사용이 제한될 수 있습니다! - **긴 대화**시 스레드를 활용하시면 더 편리합니다. 이 가이드가 도움이 되셨기를 바랍니다! 질문이 있으면 디스코드에서 언제든 문의해주세요. 🤗
agents-course/units/ko/unit0/discord101.mdx/0
{ "file_path": "agents-course/units/ko/unit0/discord101.mdx", "repo_id": "agents-course", "token_count": 2269 }
12
# smolagents로 첫 번째 에이전트 만들기 [[lets-create-our-first-agent-using-smolagents]] 앞 섹션에서 우리는 Python 코드로 에이전트를 처음부터 만드는 방법을 배웠고, **이 과정이 얼마나 번거로울 수 있는지** 직접 확인했습니다. 다행히도 많은 에이전트 라이브러리들이 **복잡한 작업들을 자동화하여** 이 과정을 훨씬 간단하게 만들어줍니다. 이 튜토리얼에서는 **여러분의 첫 번째 에이전트를 만들게 됩니다**. 이 에이전트는 이미지 생성, 웹 검색, 시간대 확인 등 다양한 작업을 수행할 수 있습니다! 또한 여러분이 만든 에이전트를 **Hugging Face Space에 올려서 친구들이나 동료들과 공유**할 수도 있습니다. 시작해 볼까요! ## smolagents란 무엇인가요? [[what-is-smolagents]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/smolagents.png" alt="smolagents"/> 이 에이전트를 만들기 위해, 우리는 **에이전트 개발을 쉽게 해주는 프레임워크인** `smolagents` 라이브러리를 사용할 것입니다. 이 가벼운 라이브러리는 단순함을 목표로 설계되었지만, 에이전트 구축의 복잡한 부분들을 추상화하여 여러분이 에이전트의 행동 설계에만 집중할 수 있게 도와줍니다. 다음 Unit에서 smolagents에 대해 더 자세히 알아볼 예정입니다. 그동안 이 <a href="https://huggingface.co/blog/smolagents" target="_blank">블로그 포스트</a>나 라이브러리의 <a href="https://github.com/huggingface/smolagents" target="_blank">GitHub 저장소</a>를 확인해보세요. 간단히 말해, `smolagents`는 **codeAgent**에 초점을 맞춘 라이브러리입니다. 이런 유형의 에이전트는 코드 블록을 통해 **"행동(Actions)"**을 수행한 다음, 코드를 실행하여 결과를 **"관찰(Observes)"**합니다. 다음은 우리가 만들 에이전트의 예시입니다! 우리가 에이전트에 **이미지 생성 도구**를 제공하고 고양이 이미지를 생성해달라고 요청했습니다. `smolagents` 내의 에이전트는 **이전에 우리가 직접 만든 에이전트와 동일한 방식으로 작동합니다**: 최종 답변에 도달할 때까지 **생각하고, 행동하고, 관찰하는 사이클을 반복**합니다: <iframe width="560" height="315" src="https://www.youtube.com/embed/PQDKcWiuln4?si=ysSTDZoi8y55FVvA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> 흥미롭지 않나요? ## 에이전트 만들기! [[lets-build-our-agent]] 시작하려면 이 Space를 복제하세요: <a href="https://huggingface.co/spaces/agents-course/First_agent_template" target="_blank">https://huggingface.co/spaces/agents-course/First_agent_template</a> > 이 템플릿을 만들어준 <a href="https://huggingface.co/m-ric" target="_blank">Aymeric</a>에게 감사드립니다! 🙌 Space를 복제한다는 것은 **자신의 프로필에 개인 사본을 만드는 것**을 의미합니다: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/duplicate-space.gif" alt="Duplicate"/> 이 강의를 통틀어 여러분이 수정해야 할 파일은 (현재 미완성 상태인) **"app.py"** 하나뿐입니다. 여기서 [템플릿의 원본 파일](https://huggingface.co/spaces/agents-course/First_agent_template/blob/main/app.py)을 확인할 수 있습니다. 여러분의 파일을 찾으려면, 복제한 Space로 이동한 다음 `Files` 탭을 클릭하고 디렉토리 목록에서 `app.py`를 클릭하세요. 코드를 함께 살펴봅시다: - 파일은 몇 가지 필요한 라이브러리 불러오기로 시작합니다 ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool import datetime import requests import pytz import yaml from tools.final_answer import FinalAnswerTool ``` 앞서 설명했듯이, **smolagents**에서 직접 **CodeAgent** 클래스를 사용할 것입니다. ### 도구 [[the-tools]] 이제 도구에 대해 알아봅시다! 도구에 관한 내용을 다시 복습하고 싶다면, 강의의 [도구](tools) 섹션을 참고하세요. ```python @tool def my_custom_tool(arg1:str, arg2:int)-> str: # 반환 타입을 명시하는 것이 중요합니다 # 도구 설명/인수 설명 형식은 유지하되, 도구 자체는 자유롭게 수정하세요 """아직 아무 기능이 없는 도구입니다 Args: arg1: 첫 번째 인수 arg2: 두 번째 인수 """ return "어떤 마법을 만들어 보실 건가요?" @tool def get_current_time_in_timezone(timezone: str) -> str: """특정 시간대의 현재 시간을 알려주는 도구입니다. Args: timezone: 유효한 시간대를 나타내는 문자열(예: 'America/New_York'). """ try: # 시간대 객체 생성 tz = pytz.timezone(timezone) # 해당 시간대의 현재 시간 가져오기 local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"{timezone}의 현재 시간은 {local_time}입니다" except Exception as e: return f"'{timezone}' 시간대 정보를 가져오는 중 오류 발생: {str(e)}" ``` 도구는 이 섹션에서 여러분이 직접 만들어볼 것입니다! 두 가지 예시를 제공해드립니다: 1. 실제로는 아무것도 하지 않는 **더미 도구** - 이것을 유용한 기능으로 수정해보세요. 2. 전 세계 어디서든 현재 시간을 알려주는 **실제 작동하는 도구**. 도구를 정의할 때 중요한 점: 1. `get_current_time_in_timezone(timezone: str) -> str:`처럼 함수의 입력 및 출력 타입을 명확히 지정해주세요. 2. **잘 작성된 문서 문자열(docstring)**을 포함하세요. `smolagents`는 모든 인수에 대해 **docstring에 설명이 있어야** 합니다. ### 에이전트 [[the-agent]] 이 에이전트는 LLM 엔진으로 [`Qwen/Qwen2.5-Coder-32B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct)를 사용합니다. 이는 서버리스 API를 통해 접근할 수 있는 매우 강력한 모델입니다. ```python final_answer = FinalAnswerTool() model = InferenceClientModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', custom_role_conversions=None, ) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) # CodeAgent 생성 agent = CodeAgent( model=model, tools=[final_answer], # 여기에 도구들을 추가하세요 (final_answer는 제거하지 마세요) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch() ``` 이 에이전트는 이전 섹션에서 살펴본 `InferenceClient`를 **InferenceClientModel** 클래스 내부에서 사용하고 있습니다! Unit 2에서 이 프레임워크에 대해 더 자세한 예시를 제공할 예정입니다. 지금은 에이전트의 `tools` 매개변수를 사용해 **도구 목록에 새로운 도구를 추가**하는 데 집중하세요. 예를 들어, 코드 첫 줄에서 불러온 `DuckDuckGoSearchTool`을 사용하거나, 코드 뒷부분에서 Hub에서 불러오는 `image_generation_tool`을 활용해볼 수 있습니다. **도구를 추가하면 에이전트에 새로운 능력이 생깁니다**. 창의성을 발휘해 보세요! 완성된 "app.py" 코드: ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool import datetime import requests import pytz import yaml from tools.final_answer import FinalAnswerTool from Gradio_UI import GradioUI # 아래는 아무 기능이 없는 도구의 예시입니다. 여러분의 창의력으로 멋진 것을 만들어보세요! @tool def my_custom_tool(arg1:str, arg2:int)-> str: # 반환 타입을 명시하는 것이 중요합니다 # 도구 설명/인수 설명 형식은 유지하되, 도구 자체는 자유롭게 수정하세요 """아직 아무 기능이 없는 도구입니다 Args: arg1: 첫 번째 인수 arg2: 두 번째 인수 """ return "어떤 마법을 만들어 보실 건가요?" @tool def get_current_time_in_timezone(timezone: str) -> str: """특정 시간대의 현재 시간을 알려주는 도구입니다. Args: timezone: 유효한 시간대를 나타내는 문자열(예: 'America/New_York'). """ try: # 시간대 객체 생성 tz = pytz.timezone(timezone) # 해당 시간대의 현재 시간 가져오기 local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"{timezone}의 현재 시간은 {local_time}입니다" except Exception as e: return f"'{timezone}' 시간대 정보를 가져오는 중 오류 발생: {str(e)}" final_answer = FinalAnswerTool() model = InferenceClientModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', custom_role_conversions=None, ) # Hub에서 도구 불러오기 image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[final_answer], # 여기에 도구들을 추가하세요 (final_answer는 제거하지 마세요) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch() ``` 여러분의 **목표**는 Space와 에이전트에 친숙해지는 것입니다. 현재 템플릿의 에이전트는 **아무런 도구도 사용하지 않고 있습니다. 미리 만들어진 도구들을 추가하거나 직접 새로운 도구를 만들어보세요!** 디스코드 채널 **#agents-course-showcase**에서 여러분이 만든 멋진 에이전트 결과물을 기다리고 있습니다! --- 축하합니다! 첫 번째 에이전트를 만드셨네요! 친구나 동료들과 자유롭게 공유해보세요. 첫 시도이니만큼 약간의 버그가 있거나 속도가 느릴 수 있는 건 매우 자연스러운 일입니다. 앞으로의 단원에서는 더 나은 에이전트를 만드는 방법을 배울 예정입니다. 가장 좋은 학습 방법은 직접 시도해보는 것입니다. 에이전트를 업데이트하거나, 더 많은 도구를 추가하거나, 다른 모델을 시험해보는 것을 망설이지 마세요. 다음 섹션에서는 최종 퀴즈를 풀고 수료증을 받게 됩니다!
agents-course/units/ko/unit1/tutorial.mdx/0
{ "file_path": "agents-course/units/ko/unit1/tutorial.mdx", "repo_id": "agents-course", "token_count": 7468 }
13
# Заключение [[conclusion]] Поздравляем с завершением этого первого раздела 🥳. Вы только что **овладели основами работы агентов** и создали своего первого AI Агента! Это **нормально, если вы все еще чувствуете себя сбитым с толку некоторыми из этих элементов**. Агенты - сложная тема, и обычно требуется время, чтобы понять все. **Потратьте время, чтобы действительно понять материал**, прежде чем продолжать. Важно освоить эти элементы и заложить прочный фундамент, прежде чем приступать к самой интересной части. А если вы пройдете тест, не забудьте получить сертификат 🎓 👉 [здесь](https://huggingface.co/spaces/agents-course/unit1-certification-app) <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/certificate-example.jpg" alt="Пример Сертификата"/> В следующем (бонусном) разделе вы научитесь **дообучать Agent вызову функций (а также вызову инструментов по запросу пользователя)**. Наконец, мы хотели бы **услышать, что вы думаете о курсе и как мы можем его улучшить**. Если у вас есть отзыв, пожалуйста, 👉 [заполните эту форму](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### Продолжайте учиться, оставайтесь потрясающими 🤗
agents-course/units/ru-RU/unit1/conclusion.mdx/0
{ "file_path": "agents-course/units/ru-RU/unit1/conclusion.mdx", "repo_id": "agents-course", "token_count": 1128 }
14
# Hãy fine-Tune model của bạn cho chức năng function-calling Chúng ta đã sẵn sàng để fine-tune (tinh chỉnh) model đầu tiên cho function-calling rồi đây 🔥. ## Làm thế nào để training model cho function-calling? > Câu trả lời: Ta cần **data** Quá trình training model có thể chia thành 3 bước: 1. **Model được pretrain trên lượng data khổng lồ**. Kết quả của bước này là **pretrained model**. Ví dụ: [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b). Đây là model nền tảng và chỉ biết **dự đoán token tiếp theo mà không có khả năng tuân theo chỉ dẫn**. 2. Để hữu ích trong bối cảnh chat, model cần được **fine-tune** để tuân theo hướng dẫn. Ở bước này, quá trình training có thể được thực hiện bởi nhà phát triển model, cộng đồng mã nguồn mở, bạn hay bất kỳ ai. Ví dụ: [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) là model đã được fine-tune để tuân theo chỉ dẫn bởi đội ngũ Google của dự án Gemma. 3. Model sau đó có thể được **alignment** (cân chỉnh) theo mong muốn của người tạo. Ví dụ: model chat hỗ trợ khách hàng không bao giờ được bất lịch sự. Thông thường các sản phẩm hoàn chỉnh như Gemini hay Mistral **sẽ trải qua cả 3 bước**, trong khi các model bạn tìm thấy trên Hugging Face đã hoàn thành một hoặc nhiều bước training. Trong hướng dẫn này, chúng ta sẽ xây dựng model function-calling dựa trên [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it). Ta chọn model đã fine-tune [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) thay vì model nền tảng [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b) vì model đã fine-tune đã được cải thiện cho use-case của ta. Nếu bắt đầu từ pretrained model **sẽ cần training nhiều hơn để học cách tuân theo chỉ dẫn, chat VÀ function-calling**. Bằng cách bắt đầu từ model đã fine-tune để tuân theo chỉ dẫn, **ta giảm thiểu lượng thông tin model cần học**. ## LoRA (Low-Rank Adaptation of Large Language Models) LoRA là kỹ thuật training nhẹ và phổ biến giúp **giảm đáng kể số parameters cần training**. Nó hoạt động bằng cách **chèn một lượng nhỏ weights mới vào model như adapter để training**. Điều này giúp training với LoRA nhanh hơn, tiết kiệm bộ nhớ hơn, và tạo ra weights model nhỏ hơn (vài trăm MB), dễ lưu trữ và chia sẻ. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/blog_multi-lora-serving_LoRA.gif" alt="LoRA inference" width="50%"/> LoRA hoạt động bằng cách thêm các cặp ma trận phân tách hạng vào các lớp Transformer, thường tập trung vào các lớp tuyến tính. Trong quá trình training, ta sẽ "đóng băng" phần còn lại của model và chỉ cập nhật weights của các adapter mới này. Nhờ vậy, số **parameters** cần training giảm đáng kể vì ta chỉ cần cập nhật weights của adapter. Trong quá trình inference, đầu vào sẽ đi qua adapter và model nền tảng, hoặc các weights adapter có thể được hợp nhất với model nền tảng mà không gây thêm độ trễ. LoRA đặc biệt hữu ích để điều chỉnh các mô hình ngôn ngữ **lớn** cho các tác vụ hoặc lĩnh vực cụ thể trong khi vẫn quản lý được yêu cầu tài nguyên. Điều này giúp giảm bộ nhớ **required** để training model. Nếu muốn tìm hiểu thêm về cách hoạt động của LoRA, hãy xem [hướng dẫn này](https://huggingface.co/learn/nlp-course/chapter11/4?fw=pt). ## Fine-Tuning (tinh chỉnh) Model cho Function-Calling Bạn có thể truy cập notebook hướng dẫn tại đây 👉 [đây](https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb). Sau đó, click vào [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb) để chạy notebook trên Colab.
agents-course/units/vi/bonus-unit1/fine-tuning.mdx/0
{ "file_path": "agents-course/units/vi/bonus-unit1/fine-tuning.mdx", "repo_id": "agents-course", "token_count": 2479 }
15
--- ### Q1: Agent (tác nhân) là gì? Lựa chọn nào sau đây mô tả đúng nhất về AI agent? <Question choices={[ { text: "Hệ thống chỉ xử lý văn bản tĩnh, không có cơ chế tương tác động với môi trường xung quanh hay thực hiện hành động có ý nghĩa.", explain: "Agent phải có khả năng thực hiện hành động và tương tác với môi trường.", }, { text: "Mô hình AI có thể lập luận, lên kế hoạch và sử dụng công cụ để tương tác với môi trường nhằm đạt mục tiêu cụ thể.", explain: "Định nghĩa này nắm bắt được đặc tính cốt lõi của Agent.", correct: true }, { text: "Trợ lý ảo chỉ trả lời câu hỏi, không có khả năng thực hiện hành động hay tương tác với hệ thống bên ngoài.", explain: "Chatbot như vậy thiếu khả năng hành động, khác biệt so với Agent.", }, { text: "Kho thông tin trực tuyến cung cấp nội dung tĩnh, không có khả năng thực thi tác vụ hay tương tác chủ động với người dùng.", explain: "Agent tương tác chủ động với môi trường thay vì chỉ cung cấp thông tin tĩnh.", } ]} /> --- ### Q2: Vai trò của Hoạch định trong Agent Tại sao Agent cần lập kế hoạch trước khi hành động? <Question choices={[ { text: "Để lưu trữ/ghi nhớ các tương tác trong quá khứ thay vì vạch ra chuỗi hành động tương lai.", explain: "Hoạch định liên quan đến xác định hành động tương lai, không phải lưu trữ quá khứ.", }, { text: "Để quyết định trình tự hành động và chọn công cụ phù hợp nhằm đáp ứng yêu cầu của người dùng.", explain: "Hoạch định giúp Agent xác định các bước và công cụ tối ưu để hoàn thành tác vụ.", correct: true }, { text: "Để thực thi chuỗi hành động ngẫu nhiên, thiếu chiến lược hoặc mục tiêu rõ ràng.", explain: "Hoạch định đảm bảo hành động của Agent có chủ đích, không phải ngẫu nhiên.", }, { text: "Chỉ để chuyển đổi văn bản, bỏ qua quá trình xây dựng trình tự hành động có chủ đích hay lập luận chiến lược.", explain: "Hoạch định liên quan đến cấu trúc hành động, không chỉ chuyển đổi văn bản.", } ]} /> --- ### Q3: Công cụ (Tools) nâng cao khả năng của Agent như thế nào? Tại sao Tools lại quan trọng với Agent? <Question choices={[ { text: "Công cụ không có tác dụng thực sự và không giúp Agent thực hiện hành động vượt qua việc tạo văn bản cơ bản.", explain: "Công cụ mở rộng khả năng của Agent bằng cách cho phép thực hiện hành động vượt xa tạo văn bản.", }, { text: "Công cụ cho phép Agent thực hiện các hành động mà mô hình tạo văn bản không làm được, như pha cà phê hay tạo ảnh.", explain: "Công cụ giúp Agent tương tác với thế giới thực và hoàn thành tác vụ.", correct: true }, { text: "Công cụ chỉ dùng để lưu trữ bộ nhớ, không có khả năng hỗ trợ thực thi tác vụ hay nâng cao hiệu suất tương tác.", explain: "Công cụ chủ yếu dùng để thực hiện hành động, không chỉ lưu trữ dữ liệu.", }, { text: "Công cụ hạn chế Agent chỉ ở việc tạo văn bản, ngăn không cho thực hiện các hành động tương tác đa dạng hơn.", explain: "Ngược lại, công cụ cho phép Agent vượt xa phản hồi dạng văn bản.", } ]} /> --- ### Q4: Hành động (Action) khác Công cụ (Tools) như thế nào? Điểm khác biệt chính giữa Action và Tools là gì? <Question choices={[ { text: "Hành động là các bước Agent thực hiện, còn Công cụ là tài nguyên bên ngoài Agent có thể sử dụng.", explain: "Hành động là mục tiêu cấp cao, còn Công cụ là chức năng cụ thể Agent gọi đến.", correct: true }, { text: "Hành động và Công cụ hoàn toàn giống nhau, có thể thay thế cho nhau.", explain: "Không, Hành động là mục tiêu/tác vụ, còn Công cụ là tiện ích cụ thể Agent dùng để đạt mục tiêu.", }, { text: "Công cụ được coi là tiện ích đa năng, còn Hành động bị hiểu nhầm là chỉ liên quan tương tác vật lý.", explain: "Không hẳn. Hành động có thể bao gồm cả tác vụ số và vật lý.", }, { text: "Hành động bắt buộc phải dùng LLM để xác định và thực thi, còn Công cụ hoạt động độc lập không phụ thuộc.", explain: "Dù LLM giúp quyết định Hành động, bản thân Hành động không phụ thuộc vào LLM.", } ]} /> --- ### Q5: Vai trò của Mô hình ngôn ngữ lớn (LLM) trong Agent LLM đóng góp thế nào vào chức năng của Agent? <Question choices={[ { text: "LLM chỉ là kho lưu trữ thụ động, không có khả năng xử lý đầu vào hay tạo phản hồi động.", explain: "LLM xử lý đầu vào văn bản và tạo phản hồi một cách chủ động.", }, { text: "LLM đóng vai trò 'bộ não' lập luận của Agent, xử lý đầu vào văn bản để hiểu chỉ dẫn và lập kế hoạch hành động.", explain: "LLM giúp Agent diễn giải, lập kế hoạch và quyết định các bước tiếp theo.", correct: true }, { text: "LLM bị hiểu nhầm là chỉ xử lý ảnh, trong khi chức năng chính là xử lý và tạo văn bản.", explain: "LLM chủ yếu làm việc với văn bản, dù đôi khi có thể xử lý đa phương thức.", }, { text: "LLM hoàn toàn không liên quan đến hoạt động của AI agent, là thành phần thừa trong ứng dụng thực tế.", explain: "LLM là thành phần cốt lõi của AI agent hiện đại.", } ]} /> --- ### Q6: Ví dụ thực tế nào minh họa rõ nhất AI agent? Ví dụ nào trong thực tế thể hiện rõ nhất AI agent đang hoạt động? <Question choices={[ { text: "Trang FAQ tĩnh trên website chỉ cung cấp thông tin cố định, không có khả năng phản hồi tương tác.", explain: "Trang FAQ tĩnh không tương tác động hay thực hiện hành động.", }, { text: "Trợ lý ảo như Siri/Alexa có thể hiểu lệnh thoại, lập luận và thực hiện tác vụ như đặt nhắc nhở hay gửi tin nhắn.", explain: "Ví dụ này bao gồm lập luận, hoạch định và tương tác với môi trường.", correct: true }, { text: "Máy tính đơn giản thực hiện phép toán dựa trên quy tắc cố định, không có khả năng lập luận hay hoạch định.", explain: "Máy tính tuân theo quy tắc cố định mà không cần lập luận, nên không phải Agent.", }, { text: "NPC trong game hoạt động theo kịch bản cố định, không thể lập luận, lập kế hoạch hay dùng công cụ bên ngoài.", explain: "NPC không thể lập luận, hoạch định hay dùng công cụ thì không phải AI agent.", } ]} /> --- Chúc mừng bạn đã hoàn thành Kiểm tra nhanh 🥳! Nếu cần ôn lại nội dung nào, hãy xem lại chương này để củng cố kiến thức trước khi đi sâu vào "bộ não" của Agent: LLM.
agents-course/units/vi/unit1/quiz1.mdx/0
{ "file_path": "agents-course/units/vi/unit1/quiz1.mdx", "repo_id": "agents-course", "token_count": 4826 }
16
<CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit2/monitoring-and-evaluating-agents.ipynb"}, ]} /> # 附加单元 2:AI 智能体(AI Agent)的可观测性与评估 <Tip> 你可以跟随<a href="https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit2/monitoring-and-evaluating-agents-notebook.ipynb" target="_blank">这个 notebook</a> 中的代码进行操作,你可以在 Google Colab 上运行它。 </Tip> 在这个 notebook 中,我们将学习如何使用开源可观测性工具来**监督我们 AI 智能体的内部步骤(追踪)**并**评估其性能**。 观测和评估智能体行为的能力对于以下方面至关重要: - 当任务失败或产生次优结果时调试问题 - 实时跟踪成本和性能 - 通过持续反馈提高可靠性和安全性 ## 练习先决条件 🏗️ 在运行此 notebook 之前,请确保你已经: 🔲 📚 **学习了** [智能体简介](https://huggingface.co/learn/agents-course/unit1/introduction) 🔲 📚 **学习了** [smolagents 框架](https://huggingface.co/learn/agents-course/unit2/smolagents/introduction) ## 步骤 0:安装所需的库 我们将需要一些库,以便我们能够运行、监控和评估我们的智能体: ```python %pip install 'smolagents[telemetry]' %pip install opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents %pip install langfuse datasets 'smolagents[gradio]' ``` ## 步骤 1:检测你的智能体 在这个 notebook 中,我们将使用 [Langfuse](https://langfuse.com/) 作为我们的可观测性工具,但你可以使用**任何其他兼容 OpenTelemetry 的服务**。下面的代码展示了如何为 Langfuse(或任何 OTel 端点)设置环境变量,以及如何检测你的 smolagent。 **请注意:** 如果你正在使用 LlamaIndex 或 LangGraph,你可以在[这里](https://langfuse.com/docs/integrations/llama-index/workflows)和[这里](https://langfuse.com/docs/integrations/langchain/example-python-langgraph)找到检测它们的文档。 首先,让我们配置正确的环境变量,以设置到 Langfuse OpenTelemetry 端点的连接。 ```python import os import base64 # 从 https://cloud.langfuse.com 获取你自己的密钥 LANGFUSE_PUBLIC_KEY = "pk-lf-..." LANGFUSE_SECRET_KEY = "sk-lf-..." os.environ["LANGFUSE_PUBLIC_KEY"] = LANGFUSE_PUBLIC_KEY os.environ["LANGFUSE_SECRET_KEY"] = LANGFUSE_SECRET_KEY os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 欧盟区域示例 # os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 美国区域示例 LANGFUSE_AUTH = base64.b64encode( f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode() ).decode() os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = os.environ.get("LANGFUSE_HOST") + "/api/public/otel" os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}" ``` 我们还需要配置我们的 Hugging Face token 用于推理调用。 ```python # 将你的 Hugging Face 和其他 token或者密钥设置为环境变量 os.environ["HF_TOKEN"] = "hf_..." ``` 接下来,我们可以为我们配置的 OpenTelemetry 设置一个 tracer-provider。 ```python from opentelemetry.sdk.trace import TracerProvider from openinference.instrumentation.smolagents import SmolagentsInstrumentor from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace.export import SimpleSpanProcessor # 为 OpenTelemetry 创建一个 TracerProvider trace_provider = TracerProvider() # 添加一个带有 OTLPSpanExporter 的 SimpleSpanProcessor 来发送追踪 trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter())) # 设置全局默认 tracer provider from opentelemetry import trace trace.set_tracer_provider(trace_provider) tracer = trace.get_tracer(__name__) # 使用配置的 provider 检测 smolagents SmolagentsInstrumentor().instrument(tracer_provider=trace_provider) ``` ## 步骤 2:测试你的检测 这里有一个来自 smolagents 的简单 CodeAgent,用于计算 `1+1`。我们运行它来确认检测是否正常工作。如果一切设置正确,你将在你的可观测性仪表板中看到日志/跨度(spans)。 ```python from smolagents import InferenceClientModel, CodeAgent # 创建一个简单的智能体来测试检测 agent = CodeAgent( tools=[], model=InferenceClientModel() ) agent.run("1+1=") ``` 检查你的 [Langfuse Traces Dashboard](https://cloud.langfuse.com)(或你选择的可观测性工具)以确认跨度(spans)和日志已被记录。 Langfuse 中的示例截图: ![Example trace in Langfuse](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/first-example-trace.png) _[追踪链接](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1b94d6888258e0998329cdb72a371155?timestamp=2025-03-10T11%3A59%3A41.743Z)_ ## 步骤 3:观测和评估更复杂的AI智能体 既然你已经确认你的检测工作正常,让我们尝试一个更复杂的查询,这样我们就可以看到高级指标(token 使用量、延迟、成本等)是如何被追踪的。 ```python from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel) search_tool = DuckDuckGoSearchTool() agent = CodeAgent(tools=[search_tool], model=InferenceClientModel()) agent.run("How many Rubik's Cubes could you fit inside the Notre Dame Cathedral?") ``` ### 追踪结构 大多数可观测性工具会记录一个**追踪(trace)**,其中包含**跨度(spans)**,每个跨度代表你的智能体逻辑的一个步骤。在这里,追踪包含了整体的智能体运行以及用于以下内容的子跨度: - 工具调用 (DuckDuckGoSearchTool) - LLM 调用 (InferenceClientModel) 你可以检查这些跨度,以精确地了解时间花在哪里、使用了多少 token 等等: Langfuse 中的追踪树: ![Trace tree in Langfuse](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/trace-tree.png) _[前往追踪(trace)的链接](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1ac33b89ffd5e75d4265b62900c348ed?timestamp=2025-03-07T13%3A45%3A09.149Z&display=preview)_ ## 在线评估 在上一节中,我们了解了在线评估和离线评估的区别。现在,我们将了解如何在生产环境中监控你的智能体并实时评估它。 ### 生产环境中要追踪的常见指标 1. **成本** — smolagents 检测会捕获 token 使用量,你可以通过为每个 token 分配价格将其转换为近似成本。 2. **延迟** — 观察完成每个步骤或整个运行所需的时间。 3. **用户反馈** — 用户可以提供直接反馈(点赞/点踩)来帮助优化或纠正智能体。 4. **LLM 作为评判者** — 使用一个单独的 LLM 来近乎实时地评估你的智能体的输出(例如,检查毒性或正确性)。 下面,我们展示这些指标的示例。 #### 1. 成本 下面是一个显示 `Qwen2.5-Coder-32B-Instruct` 调用使用情况的截图。这对于查看成本高昂的步骤并优化你的智能体很有用。 ![Costs](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/smolagents-costs.png) _[前往追踪(trace)的链接](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1ac33b89ffd5e75d4265b62900c348ed?timestamp=2025-03-07T13%3A45%3A09.149Z&display=preview)_ #### 2. 延迟 我们还可以看到完成每个步骤所需的时间。在下面的示例中,整个对话花费了 32 秒,你可以按步骤分解。这有助于你识别瓶颈并优化你的智能体。 ![Latency](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/smolagents-latency.png) _[前往追踪(trace)的链接](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1ac33b89ffd5e75d4265b62900c348ed?timestamp=2025-03-07T13%3A45%3A09.149Z&display=preview)_ #### 3. 附加属性 你还可以通过在跨度(spans)上设置附加属性——例如用户 ID、会话 ID 或标签。例如,smolagents 检测使用 OpenTelemetry 来附加诸如 `langfuse.user.id` 或自定义标签之类的属性。 ```python from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel) from opentelemetry import trace search_tool = DuckDuckGoSearchTool() agent = CodeAgent( tools=[search_tool], model=InferenceClientModel() ) with tracer.start_as_current_span("Smolagent-Trace") as span: span.set_attribute("langfuse.user.id", "smolagent-user-123") span.set_attribute("langfuse.session.id", "smolagent-session-123456789") span.set_attribute("langfuse.tags", ["city-question", "testing-agents"]) agent.run("What is the capital of Germany?") ``` ![Enhancing agent runs with additional metrics](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/smolagents-attributes.png) #### 4. 用户反馈 如果你的智能体嵌入到用户界面中,你可以记录直接的用户反馈(例如聊天界面中的点赞/点踩)。下面是使用 [Gradio](https://gradio.app/) 嵌入带有简单反馈机制的聊天示例。 在下面的代码片段中,当用户发送聊天消息时,我们捕获 OpenTelemetry 追踪 ID。如果用户喜欢/不喜欢上一个答案,我们将评分附加到该追踪上。 ```python import gradio as gr from opentelemetry.trace import format_trace_id from smolagents import (CodeAgent, InferenceClientModel) from langfuse import Langfuse langfuse = Langfuse() model = InferenceClientModel() agent = CodeAgent(tools=[], model=model, add_base_tools=True) formatted_trace_id = None # 为演示目的,我们将在全局存储当前的 trace_id def respond(prompt, history): with trace.get_tracer(__name__).start_as_current_span("Smolagent-Trace") as span: output = agent.run(prompt) current_span = trace.get_current_span() span_context = current_span.get_span_context() trace_id = span_context.trace_id global formatted_trace_id formatted_trace_id = str(format_trace_id(trace_id)) langfuse.trace(id=formatted_trace_id, input=prompt, output=output) history.append({"role": "assistant", "content": str(output)}) return history def handle_like(data: gr.LikeData): # 作为演示,我们将用户反馈映射为 1 (喜欢) 或 0 (不喜欢) if data.liked: langfuse.score( value=1, name="user-feedback", trace_id=formatted_trace_id ) else: langfuse.score( value=0, name="user-feedback", trace_id=formatted_trace_id ) with gr.Blocks() as demo: chatbot = gr.Chatbot(label="Chat", type="messages") prompt_box = gr.Textbox(placeholder="Type your message...", label="Your message") # 当用户在提示框上按 'Enter' 时,我们运行 'respond' prompt_box.submit( fn=respond, inputs=[prompt_box, chatbot], outputs=chatbot ) # 当用户点击消息上的 '喜欢' 按钮时,我们运行 'handle_like' chatbot.like(handle_like, None, None) demo.launch() ``` 然后,用户反馈会被捕获到你的可观测性工具中: ![User feedback is being captured in Langfuse](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/user-feedback-gradio.png) #### 5. LLM 作为评判者 LLM 作为评判者(LLM-as-a-Judge)是另一种自动评估你的智能体输出的方法。你可以设置一个单独的 LLM 调用来衡量输出的正确性、毒性、风格或你关心的任何其他标准。 **工作流程**: 1. 你定义一个**评估模板**,例如,“检查文本是否有毒。” 2. 每次你的智能体生成输出时,你将该输出连同模板一起传递给你的“评判者” LLM。 3. 评判者 LLM 会返回一个评分或标签,你将其记录到你的可观测性工具中。 来自 Langfuse 的示例: ![LLM-as-a-Judge Evaluation Template](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/evaluator-template.png) ![LLM-as-a-Judge Evaluator](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/evaluator.png) ```python # 示例:检查智能体的输出是否有毒。 from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel) search_tool = DuckDuckGoSearchTool() agent = CodeAgent(tools=[search_tool], model=InferenceClientModel()) agent.run("Can eating carrots improve your vision?") ``` 你可以看到这个例子的答案被判定为“无毒”。 ![LLM-as-a-Judge Evaluation Score](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/llm-as-a-judge-score.png) #### 6. 可观测性指标概览 所有这些指标都可以在仪表板中一起可视化。这使你能够快速查看你的智能体在多个会话中的表现,并帮助你随时间追踪质量指标。 ![Observability metrics overview](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/langfuse-dashboard.png) ## 离线评估 在线评估对于实时反馈至关重要,但你还需要**离线评估**——在开发之前或期间进行系统性检查。这有助于在将更改推送到生产环境之前维护质量和可靠性。 ### 数据集评估 在离线评估中,你通常: 1. 拥有一个基准数据集(包含提示和预期输出对) 2. 在该数据集上运行你的智能体 3. 将输出与预期结果进行比较,或使用额外的评分机制 下面,我们使用 [GSM8K 数据集](https://huggingface.co/datasets/gsm8k) 来演示这种方法,该数据集包含数学问题和解决方案。 ```python import pandas as pd from datasets import load_dataset # 从 Hugging Face 获取 GSM8K dataset = load_dataset("openai/gsm8k", 'main', split='train') df = pd.DataFrame(dataset) print("GSM8K 数据集的前几行:") print(df.head()) ``` 接下来,我们在 Langfuse 中创建一个数据集实体来追踪运行。然后,我们将数据集中的每个项目添加到系统中。(如果你不使用 Langfuse,你可以简单地将这些存储在你自己的数据库或本地文件中进行分析。) ```python from langfuse import Langfuse langfuse = Langfuse() langfuse_dataset_name = "gsm8k_dataset_huggingface" # 在 Langfuse 中创建数据集 langfuse.create_dataset( name=langfuse_dataset_name, description="从 Huggingface 上传的 GSM8K 基准数据集", metadata={ "date": "2025-03-10", "type": "benchmark" } ) ``` ```python for idx, row in df.iterrows(): langfuse.create_dataset_item( dataset_name=langfuse_dataset_name, input={"text": row["question"]}, expected_output={"text": row["answer"]}, metadata={"source_index": idx} ) if idx >= 9: # 仅上传前 10 个项目用于演示 break ``` ![Dataset items in Langfuse](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/example-dataset.png) #### 在数据集上运行智能体 我们定义一个辅助函数 `run_smolagent()`,它: 1. 启动一个 OpenTelemetry 跨度(span) 2. 在提示上运行我们的智能体 3. 在 Langfuse 中记录追踪 ID 然后,我们遍历每个数据集项目,运行智能体,并将追踪链接到数据集项目。如果需要,我们还可以附加一个快速评估分数。 ```python from opentelemetry.trace import format_trace_id from smolagents import (CodeAgent, InferenceClientModel, LiteLLMModel) # 示例:使用 InferenceClientModel 或 LiteLLMModel 访问 openai、anthropic、gemini 等模型: model = InferenceClientModel() agent = CodeAgent( tools=[], model=model, add_base_tools=True ) def run_smolagent(question): with tracer.start_as_current_span("Smolagent-Trace") as span: span.set_attribute("langfuse.tag", "dataset-run") output = agent.run(question) current_span = trace.get_current_span() span_context = current_span.get_span_context() trace_id = span_context.trace_id formatted_trace_id = format_trace_id(trace_id) langfuse_trace = langfuse.trace( id=formatted_trace_id, input=question, output=output ) return langfuse_trace, output ``` ```python dataset = langfuse.get_dataset(langfuse_dataset_name) # 针对每个数据集项目运行我们的智能体(上面限制为前 10 个) for item in dataset.items: langfuse_trace, output = run_smolagent(item.input["text"]) # 将追踪链接到数据集项目以供分析 item.link( langfuse_trace, run_name="smolagent-notebook-run-01", run_metadata={ "model": model.model_id } ) # 可选地,存储一个快速评估分数用于演示 langfuse_trace.score( name="<example_eval>", value=1, comment="这是一条评论" ) # 刷新数据以确保所有遥测数据都已发送 langfuse.flush() ``` 你可以用不同的配置重复这个过程: - 模型 (OpenAI GPT, 本地 LLM 等) - 工具 (使用搜索 vs. 不使用搜索) - 提示 (不同的系统消息) 然后在你的可观测性工具中并排比较它们: ![Dataset run overview](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/dataset_runs.png) ![Dataset run comparison](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit2/dataset-run-comparison.png) ## 结语 在这个 notebook 中,我们介绍了如何: 1. **设置可观测性** 使用 smolagents + OpenTelemetry 导出器 2. **检查检测** 通过运行一个简单的智能体 3. **捕获详细指标** (成本、延迟等) 通过可观测性工具 4. **收集用户反馈** 通过 Gradio 界面 5. **使用 LLM 作为评判者** 自动评估输出 6. **执行离线评估** 使用基准数据集 🤗 编代码愉快!
agents-course/units/zh-CN/bonus_unit2/monitoring-and-evaluating-agents-notebook.mdx/0
{ "file_path": "agents-course/units/zh-CN/bonus_unit2/monitoring-and-evaluating-agents-notebook.mdx", "repo_id": "agents-course", "token_count": 9851 }
17
# Observe: 整合反馈以反思和调整 Observations(观察)是**智能体感知其行动结果的方式**。 它们提供关键信息,为智能体的思考过程提供燃料并指导未来行动。 这些是**来自环境的信号**——无论是 API 返回的数据、错误信息还是系统日志——它们指导着下一轮的思考循环。 在观察阶段,智能体会: - **收集反馈**:接收数据或确认其行动是否成功 - **附加结果**:将新信息整合到现有上下文中,有效更新记忆 - **调整策略**:使用更新后的上下文来优化后续思考和行动 例如,当天气 API 返回数据*"partly cloudy, 15°C, 60% humidity"*(局部多云,15°C,60% 湿度)时,该观察结果会被附加到智能体的记忆(位于提示末尾)。 智能体随后利用这些信息决定是否需要额外数据,或是否准备好提供最终答案。 这种**迭代式反馈整合确保智能体始终保持与目标的动态对齐**,根据现实结果不断学习和调整。 这些观察**可能呈现多种形式**,从读取网页文本到监测机械臂位置。这可以视为工具"日志",为行动执行提供文本反馈。 | 观察类型 | 示例 | |---------------------|---------------------------------------------------------------------------| | 系统反馈 | 错误信息、成功通知、状态码 | | 数据变更 | 数据库更新、文件系统修改、状态变更 | | 环境数据 | 传感器读数、系统指标、资源使用情况 | | 响应分析 | API 响应、查询结果、计算输出 | | 基于时间的事件 | 截止时间到达、定时任务完成 | ## 结果如何被附加? 执行操作后,框架按以下步骤处理: 1. **解析操作** 以识别要调用的函数和使用的参数 2. **执行操作** 3. **将结果附加** 作为 **Observation** --- 至此我们已经学习了智能体的思考-行动-观察循环。 如果某些概念仍显模糊,不必担心——我们将在后续单元中重访并深化这些概念。 现在,是时候通过编写你的第一个智能体来实践所学知识了!
agents-course/units/zh-CN/unit1/observations.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit1/observations.mdx", "repo_id": "agents-course", "token_count": 1851 }
18
# 目录 此 LlamaIndex 框架大纲是课程第 2 单元的一部分。您可以在 hf.co/learn 上访问有关 LlamaIndex 的第 2 单元 👉 <a href="https://hf.co/learn/agents-course/unit2/llama-index/introduction">这里</a> | 标题 | 描述 | | -------------------------------- | ------------------------------------------------------------------------------------ | | [介绍](introduction.mdx) | LlamaIndex 简介 | | [LlamaHub](llama-hub.mdx) | LlamaHub:集成、智能体和工具注册表 | | [组件](components.mdx) | 组件:工作流的构件 | | [工具](tools.mdx) | 工具:如何在 LlamaIndex 中构建工具 | | [测验 1](quiz1.mdx) | 测验 1 | | [智能体](agents.mdx) | 智能体:如何在 LlamaIndex 中建立智能体 | | [工作流](workflows.mdx) | 工作流:由按顺序执行的组件组成的一系列步骤和事件 | | [测验 2](quiz2.mdx) | 测验 2 | | [结论](conclusion.mdx) | 结论 |
agents-course/units/zh-CN/unit2/llama-index/README.md/0
{ "file_path": "agents-course/units/zh-CN/unit2/llama-index/README.md", "repo_id": "agents-course", "token_count": 1137 }
19
# 小测验(不计分)[[quiz2]] 现在该测试您对*代码智能体*、*工具调用智能体*和*工具*章节的理解了。本测验为可选且不计分。 --- ### Q1: 使用 `@tool` 装饰器创建工具与创建 `Tool` 的子类之间的主要区别是什么? 以下哪个陈述最能描述这两种定义工具方法的区别? <Question choices={[ { text: "使用 <code>@tool</code> 装饰器是检索类工具的强制要求,而 <code>Tool</code> 的子类仅用于文本生成任务", explain: "两种方法都适用于任何类型的工具,包括检索类和文本生成类工具。", }, { text: "推荐使用 <code>@tool</code> 装饰器创建简单的基于函数的工具,而 <code>Tool</code> 的子类能为复杂功能或自定义元数据提供更大灵活性", explain: "正确。装饰器方法更简单,但子类化允许更定制化的行为。", correct: true }, { text: "<code>@tool</code> 只能用于多智能体系统,而创建 <code>Tool</code> 的子类适用于单智能体场景", explain: "所有智能体(单或多)都可以使用这两种方法定义工具,没有此类限制。", }, { text: "用 <code>@tool</code> 装饰函数可以替代文档字符串,而子类工具必须不包含文档字符串", explain: "两种方法都需要清晰的文档字符串。装饰器不会替代它们,子类仍然可以包含文档字符串。", } ]} /> --- ### Q2: CodeAgent 如何使用 ReAct(推理+行动)方法处理多步骤任务? 哪个陈述正确描述了 CodeAgent 执行系列步骤来解决任务的方式? <Question choices={[ { text: "它将每个步骤传递给多智能体系统中的不同智能体,然后合并结果", explain: "尽管多智能体系统可以分配任务,但 CodeAgent 本身可以使用 ReAct 独立处理多个步骤。", }, { text: "它将所有操作存储为 JSON 格式以便解析,然后一次性执行所有操作", explain: "此行为匹配 ToolCallingAgent 的基于 JSON 的方法,而非 CodeAgent。", }, { text: "它循环执行以下流程:编写内部思考、生成 Python 代码、执行代码并记录结果,直到得出最终答案", explain: "正确。这描述了 CodeAgent 使用的 ReAct 模式,包括迭代推理和代码执行。", correct: true }, { text: "它依赖视觉模块验证代码输出后才能继续下一步", explain: "smolagents 支持视觉能力,但它们不是 CodeAgent 或 ReAct 方法的默认要求。", } ]} /> --- ### Q3: 在 Hugging Face Hub 上共享工具的主要优势是什么? 选择开发者可能上传和共享自定义工具的最佳原因。 <Question choices={[ { text: "它会自动将工具与 MultiStepAgent 集成以实现检索增强生成", explain: "共享工具不会自动设置检索或多步逻辑,只是使工具可用。", }, { text: "它允许他人在无需额外设置的情况下发现、重用并将您的工具集成到他们的 smolagents 中", explain: "正确。在 Hub 上共享使工具可供任何人(包括您自己)快速下载和重用。", correct: true }, { text: "它确保只有 CodeAgent 可以调用该工具,而 ToolCallingAgent 无法调用", explain: "CodeAgent 和 ToolCallingAgent 都可以调用共享工具,没有基于智能体类型的限制。", }, { text: "它会将您的工具转换为具备完整视觉能力的图像处理函数", explain: "工具共享不会自动改变工具功能或增加视觉能力。", } ]} /> --- ### Q4: ToolCallingAgent 在执行操作方面与 CodeAgent 有何不同? 选择准确描述 ToolCallingAgent 工作方式的选项。 <Question choices={[ { text: "ToolCallingAgent 仅兼容多智能体系统,而 CodeAgent 可以单独运行", explain: "两种智能体都可以单独使用或作为多智能体系统的一部分。", }, { text: "ToolCallingAgent 将所有推理委托给单独的检索智能体,然后返回最终答案", explain: "ToolCallingAgent 仍然使用主 LLM 进行推理,不完全依赖检索智能体。", }, { text: "ToolCallingAgent 输出指定工具调用和参数的 JSON 指令,这些指令会被解析并执行", explain: "正确。ToolCallingAgent 使用 JSON 方法来定义工具调用。", correct: true }, { text: "ToolCallingAgent 仅适用于单步任务,在调用一个工具后自动停止", explain: "ToolCallingAgent 可以像 CodeAgent 一样根据需要执行多个步骤。", } ]} /> --- ### Q5: smolagents 默认工具箱包含哪些内容?为什么要使用它? 哪个陈述最能体现 smolagents 默认工具箱的目的和内容? <Question choices={[ { text: "它提供常用工具集(如 DuckDuckGo 搜索、PythonInterpreterTool 和最终答案工具)用于快速原型开发", explain: "正确。默认工具箱包含这些现成工具,便于在构建智能体时快速集成。", correct: true }, { text: "它默认仅支持基于视觉的任务(如图像分类或 OCR)", explain: "尽管 smolagents 可以集成视觉功能,但默认工具箱并非专门面向视觉任务。", }, { text: "它专门为多智能体系统设计,与单 CodeAgent 不兼容", explain: "默认工具箱适用于任何智能体类型,包括单智能体和多智能体设置。", }, { text: "它添加了基于检索的高级功能,支持来自向量存储的大规模问答", explain: "虽然可以构建检索工具,但默认工具箱不会自动提供高级 RAG 功能。", } ]} /> --- 恭喜完成测验!🎉 如果有任何问题让您感到困难,请重新访问*代码智能体*、*工具调用智能体*或*工具*章节以加强理解。如果您表现出色,那么您已踏上构建强大 smolagents 应用的道路!
agents-course/units/zh-CN/unit2/smolagents/quiz2.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/smolagents/quiz2.mdx", "repo_id": "agents-course", "token_count": 3839 }
20
# 动手实践 现在你已经准备好更深入地创建你的最终智能体了,让我们看看如何提交它以供评审。 ## 数据集 此排行榜使用的数据集包含从 GAIA **验证**集的一级问题中所提取的 20 个问题。 这些问题是根据回答问题所需的工具和步骤数量进行筛选的。 根据 GAIA 基准目前的状况,我们认为让你尝试在一级问题中达到 30% 的准确率是一个相对好的测试。 <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/leaderboard%20GAIA%2024%3A04%3A2025.png" alt="GAIA current status!" /> ## 流程 现在你脑海中最大的问题可能是:“我如何开始提交?” 对于本单元,我们创建了一个 API,你可以通过它获取问题并发送答案进行评分。 以下是路由摘要(请参阅[实时文档](https://agents-course-unit4-scoring.hf.space/docs)以获取交互式详细信息): * **`GET /questions`**:检索过滤后的评估问题完整列表。 * **`GET /random-question`**:从列表中获取一个随机问题。 * **`GET /files/{task_id}`**:下载与给定任务 ID 关联的特定文件。 * **`POST /submit`**:提交智能体的答案,计算分数,并更新排行榜。 提交函数将以**完全匹配**的方式将答案与真实答案进行比较,因此请好好作出提示!GAIA 团队在此处分享了一个代理提示示例(为了本课程的目的,请确保你的提交中不包含文本“FINAL ANSWER”,只需让你的代理回复答案而无需其他内容)。 🎨 **打造你自己的模板!** 为了演示与 API 交互的过程,我们提供了一个[基本模板](https://huggingface.co/spaces/agents-course/Final_Assignment_Template)作为开始的部分。 我们请您随意的去更改、添加或完全重构它!以最适合你的方法和具有创造力的方式修改它。这也是我们所**积极鼓励**的方式。 为了提交此模板,请需要计算 API 所需的 3 个内容: * **用户名:** 你的 Hugging Face 用户名(此处通过 Gradio 登录获取),用于识别你的提交。 * **代码链接 (`agent_code`):** 指向你的 Hugging Face Space 代码(`.../tree/main`)的 URL,用于验证目的,因此请保持你的 Space 为公开。 * **答案 (`answers`):** 你的代理生成的响应列表(`{"task_id": ..., "submitted_answer": ...}`),用于评分。 因此,我们鼓励你从复制此[模板](https://huggingface.co/spaces/agents-course/Final_Assignment_Template)到你自己的 huggingface 个人资料开始。 🏆 在[此处](https://huggingface.co/spaces/agents-course/Students_leaderboard)查看排行榜 *温馨提示:此排行榜仅供娱乐!我们知道可以在没有完全验证的情况下提交分数。如果我们看到太多没有公开链接支持的高分,我们可能需要审查、调整或删除一些条目,以保持排行榜的有用性。* 排行榜将显示你的 Space 代码库链接,由于此排行榜仅供学生使用,如果你获得了一个令你骄傲的分数,请保持你的 Space 公开。 <iframe src="https://agents-course-students-leaderboard.hf.space" frameborder="0" width="850" height="450" ></iframe>
agents-course/units/zh-CN/unit4/hands-on.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit4/hands-on.mdx", "repo_id": "agents-course", "token_count": 2053 }
21
# Pytorch cheatsheet {{#include ../../../README.md:cheatsheet}}
candle/candle-book/src/guide/cheatsheet.md/0
{ "file_path": "candle/candle-book/src/guide/cheatsheet.md", "repo_id": "candle", "token_count": 26 }
22
# MNIST So we now have downloaded the MNIST parquet files, let's put them in a simple struct. ```rust,ignore {{#include ../lib.rs:book_training_3}} ``` The parsing of the file and putting it into single tensors requires the dataset to fit the entire memory. It is quite rudimentary, but simple enough for a small dataset like MNIST.
candle/candle-book/src/training/mnist.md/0
{ "file_path": "candle/candle-book/src/training/mnist.md", "repo_id": "candle", "token_count": 93 }
23
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor) { a.sqrt().unwrap(); } fn run_unary_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let b = 1; let m = 1024; let k = 1024; let tensor = Tensor::arange(0.0f32, (b * m * k) as f32, device) .unwrap() .to_dtype(dtype) .unwrap() .reshape((b, m, k)) .unwrap(); let flops = b * m * k * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { for dtype in [DType::F32, DType::BF16, DType::F16] { let name = format!("sqrt_{:?}", dtype); run_unary_benchmark(c, &device, dtype, &name); } } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/unary.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/unary.rs", "repo_id": "candle", "token_count": 656 }
24
use super::Cpu; use core::arch::wasm32::*; pub struct CurrentCpu {} const STEP: usize = 16; const EPR: usize = 4; const ARR: usize = STEP / EPR; impl Cpu<ARR> for CurrentCpu { type Unit = v128; type Array = [v128; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { f32x4_splat(0.0) } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { f32x4_splat(v) } unsafe fn load(mem_addr: *const f32) -> Self::Unit { v128_load(mem_addr as *mut v128) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { f32x4_add(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { f32x4_add(f32x4_mul(b, c), a) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { v128_store(mem_addr as *mut v128, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = f32x4_add(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = f32x4_add(x[4 * i], x[4 * i + 2]); } for i in 0..ARR / 8 { x[8 * i] = f32x4_add(x[8 * i], x[8 * i + 4]); } *y = f32x4_extract_lane::<0>(x[0]) + f32x4_extract_lane::<1>(x[0]) + f32x4_extract_lane::<2>(x[0]) + f32x4_extract_lane::<3>(x[0]); } }
candle/candle-core/src/cpu/simd128.rs/0
{ "file_path": "candle/candle-core/src/cpu/simd128.rs", "repo_id": "candle", "token_count": 839 }
25
//! Tensor Layouts including contiguous or sparse strides use crate::{Error, Result, Shape}; #[derive(Debug, PartialEq, Eq, Clone)] pub struct Layout { shape: Shape, // The strides are given in number of elements and not in bytes. stride: Vec<usize>, start_offset: usize, } impl Layout { pub fn new(shape: Shape, stride: Vec<usize>, start_offset: usize) -> Self { Self { shape, stride, start_offset, } } pub fn contiguous_with_offset<S: Into<Shape>>(shape: S, start_offset: usize) -> Self { let shape = shape.into(); let stride = shape.stride_contiguous(); Self { shape, stride, start_offset, } } pub fn contiguous<S: Into<Shape>>(shape: S) -> Self { Self::contiguous_with_offset(shape, 0) } pub fn dims(&self) -> &[usize] { self.shape.dims() } /// The dimension size for a specified dimension index. pub fn dim<D: crate::shape::Dim>(&self, dim: D) -> Result<usize> { let dim = dim.to_index(&self.shape, "dim")?; Ok(self.dims()[dim]) } pub fn shape(&self) -> &Shape { &self.shape } pub fn stride(&self) -> &[usize] { &self.stride } pub fn start_offset(&self) -> usize { self.start_offset } /// Returns the appropriate start and stop offset if the data is stored in a C /// contiguous (aka row major) way. pub fn contiguous_offsets(&self) -> Option<(usize, usize)> { if self.is_contiguous() { let start_o = self.start_offset; Some((start_o, start_o + self.shape.elem_count())) } else { None } } /// Returns true if the data is stored in a C contiguous (aka row major) way. /// Note that this does not implies that the start offset is 0 or that there are no extra /// elements at the end of the storage. pub fn is_contiguous(&self) -> bool { self.shape.is_contiguous(&self.stride) } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.shape.is_fortran_contiguous(&self.stride) } pub fn narrow(&self, dim: usize, start: usize, len: usize) -> Result<Self> { let dims = self.shape().dims(); if dim >= dims.len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op: "narrow", } .bt())? } if start + len > dims[dim] { Err(Error::NarrowInvalidArgs { shape: self.shape.clone(), dim, start, len, msg: "start + len > dim_len", } .bt())? } let mut dims = dims.to_vec(); dims[dim] = len; Ok(Self { shape: Shape::from(dims), stride: self.stride.clone(), start_offset: self.start_offset + self.stride[dim] * start, }) } pub fn transpose(&self, dim1: usize, dim2: usize) -> Result<Self> { let rank = self.shape.rank(); if rank <= dim1 || rank <= dim2 { Err(Error::UnexpectedNumberOfDims { expected: usize::max(dim1, dim2), got: rank, shape: self.shape().clone(), } .bt())? } let mut stride = self.stride().to_vec(); let mut dims = self.shape().dims().to_vec(); dims.swap(dim1, dim2); stride.swap(dim1, dim2); Ok(Self { shape: Shape::from(dims), stride, start_offset: self.start_offset, }) } pub fn permute(&self, idxs: &[usize]) -> Result<Self> { let is_permutation = idxs.len() == self.shape.rank() && (0..idxs.len()).all(|i| idxs.contains(&i)); if !is_permutation { crate::bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), idxs ) } let stride = self.stride(); let dims = self.shape().dims(); let mut perm_stride = stride.to_vec(); let mut perm_dims = dims.to_vec(); for (i, &idx) in idxs.iter().enumerate() { perm_stride[i] = stride[idx]; perm_dims[i] = dims[idx]; } Ok(Self { shape: Shape::from(perm_dims), stride: perm_stride, start_offset: self.start_offset, }) } pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let shape = shape.into(); if shape.rank() < self.shape().rank() { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } let added_dims = shape.rank() - self.shape().rank(); let mut stride = vec![0; added_dims]; for (&dst_dim, (&src_dim, &src_stride)) in shape.dims()[added_dims..] .iter() .zip(self.dims().iter().zip(self.stride())) { let s = if dst_dim == src_dim { src_stride } else if src_dim != 1 { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } else { 0 }; stride.push(s) } Ok(Self { shape, stride, start_offset: self.start_offset, }) } pub(crate) fn strided_index(&self) -> crate::StridedIndex<'_> { crate::StridedIndex::from_layout(self) } pub(crate) fn strided_blocks(&self) -> crate::StridedBlocks<'_> { let mut block_len = 1; let mut contiguous_dims = 0; // These are counted from the right. for (&stride, &dim) in self.stride().iter().zip(self.dims().iter()).rev() { if stride != block_len { break; } block_len *= dim; contiguous_dims += 1; } let index_dims = self.dims().len() - contiguous_dims; if index_dims == 0 { crate::StridedBlocks::SingleBlock { start_offset: self.start_offset, len: block_len, } } else { let block_start_index = crate::StridedIndex::new( &self.dims()[..index_dims], &self.stride[..index_dims], self.start_offset, ); crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } } } // Returns the contiguous offsets with broadcast if applicable. pub(crate) fn offsets_b(&self) -> Option<ContiguousOffsetsWithBroadcast> { let mut left_broadcast = 1; let mut right_broadcast = 1; let strides = self.stride(); let dims = self.dims(); let mut start_cont = 0; let mut end_cont = dims.len(); for (&s, &d) in strides.iter().zip(dims.iter()) { if s != 0 { break; } start_cont += 1; left_broadcast *= d; } if start_cont == dims.len() { return Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len: 1, left_broadcast, right_broadcast: 1, }); } for (&s, &d) in strides.iter().zip(dims.iter()).rev() { if s != 0 { break; } end_cont -= 1; right_broadcast *= d; } // Check that the inner dims are contiguous let strides = &strides[start_cont..end_cont]; let dims = &dims[start_cont..end_cont]; let mut len = 1; for (&stride, &dim) in strides.iter().zip(dims.iter()).rev() { if stride != len { return None; } len *= dim; } Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len, left_broadcast, right_broadcast, }) } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ContiguousOffsetsWithBroadcast { pub start: usize, pub len: usize, pub left_broadcast: usize, pub right_broadcast: usize, }
candle/candle-core/src/layout.rs/0
{ "file_path": "candle/candle-core/src/layout.rs", "repo_id": "candle", "token_count": 4460 }
26
//! Code for GGML and GGUF files use crate::{Context, CpuStorage, DType, Device, Result, Shape, Storage, Tensor}; use k_quants::*; use std::borrow::Cow; #[cfg(target_feature = "avx2")] pub mod avx; mod dummy_cuda; mod dummy_metal; pub mod ggml_file; pub mod gguf_file; pub mod k_quants; #[cfg(feature = "metal")] pub mod metal; #[cfg(not(feature = "metal"))] mod metal { pub use super::dummy_metal::*; } #[cfg(feature = "cuda")] pub mod cuda; #[cfg(not(feature = "cuda"))] mod cuda { pub use super::dummy_cuda::*; } #[cfg(target_feature = "neon")] pub mod neon; #[cfg(target_feature = "simd128")] pub mod simd128; pub mod utils; use half::{bf16, f16}; pub use k_quants::GgmlType; pub struct QTensor { storage: QStorage, shape: Shape, } impl Device { fn qzeros(&self, elem_count: usize, dtype: GgmlDType) -> Result<QStorage> { match self { Device::Cpu => { let storage = dtype.cpu_zeros(elem_count); Ok(QStorage::Cpu(storage)) } Device::Metal(metal) => { let storage = metal::QMetalStorage::zeros(metal, elem_count, dtype)?; Ok(QStorage::Metal(storage)) } Device::Cuda(cuda) => { let storage = cuda::QCudaStorage::zeros(cuda, elem_count, dtype)?; Ok(QStorage::Cuda(storage)) } } } } pub enum QStorage { Cpu(Box<dyn QuantizedType>), Metal(metal::QMetalStorage), Cuda(cuda::QCudaStorage), } impl QStorage { fn block_size(&self) -> usize { match self { QStorage::Cpu(storage) => storage.block_size(), QStorage::Metal(storage) => storage.dtype().block_size(), QStorage::Cuda(storage) => storage.dtype().block_size(), } } fn dtype(&self) -> GgmlDType { match self { QStorage::Cpu(storage) => storage.dtype(), QStorage::Metal(storage) => storage.dtype(), QStorage::Cuda(storage) => storage.dtype(), } } fn device(&self) -> Device { match self { QStorage::Cpu(_storage) => Device::Cpu, QStorage::Metal(storage) => Device::Metal(storage.device().clone()), QStorage::Cuda(storage) => Device::Cuda(storage.device().clone()), } } fn size_in_bytes(&self) -> usize { match self { QStorage::Cpu(storage) => storage.storage_size_in_bytes(), QStorage::Metal(storage) => storage.storage_size_in_bytes(), QStorage::Cuda(storage) => storage.storage_size_in_bytes(), } } fn quantize(&mut self, src: &Storage) -> Result<()> { match (self, src) { (QStorage::Cpu(storage), Storage::Cpu(src)) => { storage.from_float(src.as_slice::<f32>()?)?; } (QStorage::Metal(storage), Storage::Metal(src)) => storage.quantize(src)?, (QStorage::Cuda(storage), Storage::Cuda(src)) => storage.quantize(src)?, _ => crate::bail!("Invalid dequantize storage locations do not match"), } Ok(()) } fn dequantize(&self, elem_count: usize) -> Result<Storage> { match self { QStorage::Cpu(storage) => Ok(Storage::Cpu(storage.dequantize(elem_count)?)), QStorage::Metal(storage) => Ok(Storage::Metal(storage.dequantize(elem_count)?)), QStorage::Cuda(storage) => Ok(Storage::Cuda(storage.dequantize(elem_count)?)), } } fn data(&self) -> Result<Cow<'_, [u8]>> { match self { QStorage::Cpu(storage) => { let data_ptr = storage.as_ptr(); let size_in_bytes = storage.storage_size_in_bytes(); let data = unsafe { std::slice::from_raw_parts(data_ptr, size_in_bytes) }; Ok(Cow::from(data)) } QStorage::Metal(_) | QStorage::Cuda(_) => { crate::bail!("not implemented"); } } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum GgmlDType { F32, F16, BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, Q2K, Q3K, Q4K, Q5K, Q6K, Q8K, } impl GgmlDType { pub(crate) fn from_u32(u: u32) -> Result<Self> { let dtype = match u { 0 => Self::F32, 1 => Self::F16, 2 => Self::Q4_0, 3 => Self::Q4_1, 6 => Self::Q5_0, 7 => Self::Q5_1, 8 => Self::Q8_0, 9 => Self::Q8_1, 10 => Self::Q2K, 11 => Self::Q3K, 12 => Self::Q4K, 13 => Self::Q5K, 14 => Self::Q6K, 15 => Self::Q8K, // https://github.com/ggerganov/ggml/blob/29d87fc6676e7ed0cdfdec0804b06001d9c2bb44/include/ggml.h#L389 30 => Self::BF16, _ => crate::bail!("unknown dtype for tensor {u}"), }; Ok(dtype) } pub(crate) fn to_u32(self) -> u32 { match self { Self::F32 => 0, Self::F16 => 1, Self::Q4_0 => 2, Self::Q4_1 => 3, Self::Q5_0 => 6, Self::Q5_1 => 7, Self::Q8_0 => 8, Self::Q8_1 => 9, Self::Q2K => 10, Self::Q3K => 11, Self::Q4K => 12, Self::Q5K => 13, Self::Q6K => 14, Self::Q8K => 15, // https://github.com/ggerganov/ggml/blob/29d87fc6676e7ed0cdfdec0804b06001d9c2bb44/include/ggml.h#L389 Self::BF16 => 30, } } /// The block dtype pub fn cpu_zeros(&self, elem_count: usize) -> Box<dyn QuantizedType> { match self { Self::F32 => Box::new(vec![f32::zeros(); elem_count]), Self::F16 => Box::new(vec![f16::zeros(); elem_count]), Self::Q4_0 => Box::new(vec![BlockQ4_0::zeros(); elem_count / BlockQ4_0::BLCK_SIZE]), Self::Q4_1 => Box::new(vec![BlockQ4_1::zeros(); elem_count / BlockQ4_1::BLCK_SIZE]), Self::Q5_0 => Box::new(vec![BlockQ5_0::zeros(); elem_count / BlockQ5_0::BLCK_SIZE]), Self::Q5_1 => Box::new(vec![BlockQ5_1::zeros(); elem_count / BlockQ5_1::BLCK_SIZE]), Self::Q8_0 => Box::new(vec![BlockQ8_0::zeros(); elem_count / BlockQ8_0::BLCK_SIZE]), Self::Q8_1 => Box::new(vec![BlockQ8_1::zeros(); elem_count / BlockQ8_1::BLCK_SIZE]), Self::Q2K => Box::new(vec![BlockQ2K::zeros(); elem_count / BlockQ2K::BLCK_SIZE]), Self::Q3K => Box::new(vec![BlockQ3K::zeros(); elem_count / BlockQ3K::BLCK_SIZE]), Self::Q4K => Box::new(vec![BlockQ4K::zeros(); elem_count / BlockQ4K::BLCK_SIZE]), Self::Q5K => Box::new(vec![BlockQ5K::zeros(); elem_count / BlockQ5K::BLCK_SIZE]), Self::Q6K => Box::new(vec![BlockQ6K::zeros(); elem_count / BlockQ6K::BLCK_SIZE]), Self::Q8K => Box::new(vec![BlockQ8K::zeros(); elem_count / BlockQ8K::BLCK_SIZE]), Self::BF16 => Box::new(vec![bf16::zeros(); elem_count]), } } /// The type size for blocks in bytes. pub fn type_size(&self) -> usize { use k_quants::*; match self { Self::F32 => 4, Self::F16 | Self::BF16 => 2, Self::Q4_0 => std::mem::size_of::<BlockQ4_0>(), Self::Q4_1 => std::mem::size_of::<BlockQ4_1>(), Self::Q5_0 => std::mem::size_of::<BlockQ5_0>(), Self::Q5_1 => std::mem::size_of::<BlockQ5_1>(), // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L932 Self::Q8_0 => std::mem::size_of::<BlockQ8_0>(), Self::Q8_1 => std::mem::size_of::<BlockQ8_1>(), Self::Q2K => std::mem::size_of::<BlockQ2K>(), Self::Q3K => std::mem::size_of::<BlockQ3K>(), Self::Q4K => std::mem::size_of::<BlockQ4K>(), Self::Q5K => std::mem::size_of::<BlockQ5K>(), Self::Q6K => std::mem::size_of::<BlockQ6K>(), Self::Q8K => std::mem::size_of::<BlockQ8K>(), } } /// The block size, i.e. the number of elements stored in each block. pub fn block_size(&self) -> usize { match self { Self::F32 => 1, Self::F16 | Self::BF16 => 1, Self::Q4_0 => k_quants::QK4_0, Self::Q4_1 => k_quants::QK4_1, Self::Q5_0 => k_quants::QK5_0, Self::Q5_1 => k_quants::QK5_1, Self::Q8_0 => k_quants::QK8_0, Self::Q8_1 => k_quants::QK8_1, Self::Q2K | Self::Q3K | Self::Q4K | Self::Q5K | Self::Q6K | Self::Q8K => k_quants::QK_K, } } } // A version of GgmlType without `vec_dot` so that it can be dyn boxed. pub trait QuantizedType: Send + Sync { fn dtype(&self) -> GgmlDType; fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()>; fn dequantize(&self, elem_count: usize) -> Result<CpuStorage>; fn storage_size_in_bytes(&self) -> usize; fn as_ptr(&self) -> *const u8; fn block_size(&self) -> usize; #[allow(clippy::wrong_self_convention)] fn from_float(&mut self, xs: &[f32]) -> Result<()>; fn size(&self) -> usize; } impl<T: k_quants::GgmlType + Send + Sync> QuantizedType for Vec<T> { fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()> { k_quants::matmul(mkn, lhs, self.as_slice(), dst) } fn size(&self) -> usize { self.len() * core::mem::size_of::<T>() } fn from_float(&mut self, xs: &[f32]) -> Result<()> { T::from_float(xs, self) } fn dtype(&self) -> GgmlDType { T::DTYPE } fn block_size(&self) -> usize { T::BLCK_SIZE } fn dequantize(&self, elem_count: usize) -> Result<CpuStorage> { let mut ys = vec![0.0f32; elem_count]; T::to_float(self.as_slice(), &mut ys)?; Ok(CpuStorage::F32(ys)) } fn storage_size_in_bytes(&self) -> usize { self.len() * std::mem::size_of::<T>() } fn as_ptr(&self) -> *const u8 { self.as_ptr() as *const u8 } } impl std::fmt::Debug for QTensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "QTensor[{:?}; {:?}]", self.shape, self.dtype()) } } fn check_shape(shape: &Shape, block_size: usize) -> Result<()> { let dims = shape.dims(); if dims.is_empty() { crate::bail!("scalar tensor cannot be quantized {shape:?}") } if dims[dims.len() - 1] % block_size != 0 { crate::bail!( "quantized tensor must have their last dim divisible by block size {shape:?} {}", block_size ) } Ok(()) } impl QTensor { pub fn new<S: Into<Shape>>(storage: QStorage, shape: S) -> Result<Self> { let shape = shape.into(); check_shape(&shape, storage.block_size())?; Ok(Self { storage, shape }) } pub fn quantize(src: &Tensor, dtype: GgmlDType) -> Result<Self> { let shape = src.shape(); let block_size = dtype.block_size(); check_shape(shape, block_size)?; let src = src.to_dtype(crate::DType::F32)?.flatten_all()?; let elem_count = shape.elem_count(); if elem_count % block_size != 0 { crate::bail!( "tensor size ({shape:?}) is not divisible by block size {}", block_size ) } let mut storage = src.device().qzeros(elem_count, dtype)?; storage.quantize(&src.storage())?; Ok(Self { storage, shape: shape.clone(), }) } pub fn dtype(&self) -> GgmlDType { self.storage.dtype() } pub fn device(&self) -> Device { self.storage.device() } pub fn rank(&self) -> usize { self.shape.rank() } pub fn shape(&self) -> &Shape { &self.shape } pub fn dequantize(&self, device: &Device) -> Result<Tensor> { let storage = self.storage.dequantize(self.shape.elem_count())?; let none = crate::op::BackpropOp::none(); crate::tensor::from_storage(storage, self.shape.clone(), none, false).to_device(device) } pub fn dequantize_f16(&self, device: &Device) -> Result<Tensor> { // In the CUDA case, we have a specialized kernel as this can be useful for volta // architectures. https://github.com/huggingface/candle/issues/2136 match &self.storage { QStorage::Cuda(s) => { let s = s.dequantize_f16(self.shape.elem_count())?; let none = crate::op::BackpropOp::none(); crate::tensor::from_storage(Storage::Cuda(s), self.shape.clone(), none, false) .to_device(device) } _ => { let s = self.dequantize(device)?.to_dtype(crate::DType::F16)?; Ok(s) } } } pub fn storage_size_in_bytes(&self) -> usize { self.storage.size_in_bytes() } pub fn data(&self) -> Result<Cow<'_, [u8]>> { self.storage.data() } } #[derive(Clone, Debug)] pub enum QMatMul { QTensor(std::sync::Arc<QTensor>), Tensor(Tensor), TensorF16(Tensor), } thread_local! { static DEQUANTIZE_ALL: bool = { match std::env::var("CANDLE_DEQUANTIZE_ALL") { Ok(s) => { !s.is_empty() && s != "0" }, Err(_) => false, } } } thread_local! { static DEQUANTIZE_ALL_F16: bool = { match std::env::var("CANDLE_DEQUANTIZE_ALL_F16") { Ok(s) => { !s.is_empty() && s != "0" }, Err(_) => false, } } } impl QMatMul { pub fn from_arc(qtensor: std::sync::Arc<QTensor>) -> Result<Self> { let dequantize = match qtensor.dtype() { GgmlDType::F32 | GgmlDType::F16 | GgmlDType::BF16 => true, _ => DEQUANTIZE_ALL.with(|b| *b), }; let t = if dequantize { let tensor = qtensor.dequantize(&qtensor.device())?; Self::Tensor(tensor) } else if DEQUANTIZE_ALL_F16.with(|b| *b) { let tensor = qtensor.dequantize_f16(&qtensor.device())?; Self::TensorF16(tensor) } else { Self::QTensor(qtensor) }; Ok(t) } pub fn from_qtensor(qtensor: QTensor) -> Result<Self> { Self::from_arc(std::sync::Arc::new(qtensor)) } pub fn dequantize_f16(&self) -> Result<Tensor> { match self { Self::QTensor(t) => t.dequantize_f16(&t.device()), Self::Tensor(t) => t.to_dtype(DType::F16), Self::TensorF16(t) => Ok(t.clone()), } } pub fn forward_via_f16(&self, xs: &Tensor) -> Result<Tensor> { let w = self.dequantize_f16()?; let in_dtype = xs.dtype(); let w = match *xs.dims() { [b1, b2, _, _] => w.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => w.broadcast_left(bsize)?.t()?, _ => w.t()?, }; xs.to_dtype(DType::F16)?.matmul(&w)?.to_dtype(in_dtype) } } impl crate::CustomOp1 for QTensor { fn name(&self) -> &'static str { "qmatmul" } fn cpu_fwd( &self, storage: &crate::CpuStorage, layout: &crate::Layout, ) -> Result<(crate::CpuStorage, Shape)> { if !layout.is_contiguous() { crate::bail!("input tensor is not contiguous {layout:?}") } let src_shape = layout.shape(); // self is transposed so n is first then k. let (n, k) = self.shape.dims2()?; if src_shape.rank() < 2 { crate::bail!("input tensor has only one dimension {layout:?}") } let mut dst_shape = src_shape.dims().to_vec(); let last_k = dst_shape.pop().context("empty dst_shape")?; if last_k != k { crate::bail!("input tensor {layout:?} incompatible with {:?}", self.shape) } dst_shape.push(n); let dst_shape = Shape::from(dst_shape); #[allow(clippy::infallible_destructuring_match)] let self_storage = match &self.storage { QStorage::Cpu(storage) => storage, QStorage::Metal(_) | QStorage::Cuda(_) => crate::bail!("Invalid storage"), }; let slice = storage.as_slice::<f32>()?; let slice = &slice[layout.start_offset()..layout.start_offset() + src_shape.elem_count()]; let mut dst_storage = vec![0f32; dst_shape.elem_count()]; self_storage.matmul_t((dst_shape.elem_count() / n, k, n), slice, &mut dst_storage)?; Ok((crate::CpuStorage::F32(dst_storage), dst_shape)) } fn metal_fwd( &self, storage: &crate::MetalStorage, layout: &crate::Layout, ) -> Result<(crate::MetalStorage, Shape)> { let self_storage = match &self.storage { QStorage::Metal(metal) => metal, _ => unreachable!("Cannot call metal matmul on non metal QTensor"), }; self_storage.fwd(&self.shape, storage, layout) } fn cuda_fwd( &self, storage: &crate::CudaStorage, layout: &crate::Layout, ) -> Result<(crate::CudaStorage, Shape)> { let self_storage = match &self.storage { QStorage::Cuda(cuda) => cuda, _ => unreachable!("Cannot call cuda matmul on non cuda QTensor"), }; self_storage.fwd(&self.shape, storage, layout) } } impl crate::Module for QMatMul { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::QTensor(t) => xs.apply_op1_no_bwd(t.as_ref()), Self::Tensor(w) => { let w = match *xs.dims() { [b1, b2, _, _] => w.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => w.broadcast_left(bsize)?.t()?, _ => w.t()?, }; xs.matmul(&w) } Self::TensorF16(w) => { let in_dtype = xs.dtype(); let w = match *xs.dims() { [b1, b2, _, _] => w.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => w.broadcast_left(bsize)?.t()?, _ => w.t()?, }; xs.to_dtype(DType::F16)?.matmul(&w)?.to_dtype(in_dtype) } } } }
candle/candle-core/src/quantized/mod.rs/0
{ "file_path": "candle/candle-core/src/quantized/mod.rs", "repo_id": "candle", "token_count": 9685 }
27
use anyhow::Result; use candle_core::{test_device, test_utils, Device, IndexOp, Tensor}; /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5)) w = torch.randn((2, 4, 3)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv1d(t, w) print(res.flatten()) res = torch.nn.functional.conv1d(t, w, padding=1) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose1d(t, w_t) print(res.shape) print(res) res = torch.nn.functional.conv_transpose1d(t, w_t, groups=2) print(res.shape) print(res) */ fn conv1d(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145, 1.8025, -0.1536, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, -1.0124, 0.5599, ], dev, )? .reshape((1, 4, 5))?; let w = Tensor::new( &[ -0.8404f32, -0.3490, 0.0130, 1.3123, 0.1763, -1.9249, 1.4270, 0.9421, 0.8670, -0.7181, -1.1111, 0.8869, -1.2429, 1.8357, 1.6052, -1.3844, 0.3951, -1.2036, 0.6686, 1.6261, -0.6451, -0.0840, -1.4247, 0.5512, ], dev, )? .reshape((2, 4, 3))?; let res = t.conv1d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.6357, -1.3336, 4.1393, -1.1784, 3.5675, 0.5069] ); let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 5]); // Same as pytorch default padding: use zeros. assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.4509, 2.6357, -1.3336, 4.1393, 0.5657, 1.8091, -1.1784, 3.5675, 0.5069, 3.3352] ); let res = { let t = Tensor::cat(&[&t.zeros_like()?, &t, &t.zeros_like()?], 0)?; t.conv1d(&w, /*padding*/ 1, 1, 1, 1)? }; assert_eq!(res.dims(), [3, 2, 5]); // Same as pytorch default padding: use zeros. assert_eq!( test_utils::to_vec1_round(&res.i(0)?.flatten_all()?, 4)?, [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] ); assert_eq!( test_utils::to_vec1_round(&res.i(1)?.flatten_all()?, 4)?, [2.4509, 2.6357, -1.3336, 4.1393, 0.5657, 1.8091, -1.1784, 3.5675, 0.5069, 3.3352] ); let w = w.transpose(0, 1)?; // The CPU kernels applied in the contiguous and non contiguous cases are different. for w in [w.clone(), w.contiguous()?] { let res = t.conv_transpose1d(&w, 0, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 7]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ 0.0699, -1.2899, 8.3018, 5.5873, 2.4572, -2.6143, -0.0706, 1.8765, 4.8318, 1.1538, 4.7076, -5.9745, -0.8276, 1.621 ], ); let res = t.conv_transpose1d(&w, 0, 0, 1, 1, 2)?; assert_eq!(res.dims(), [1, 4, 7]); assert_eq!( test_utils::to_vec2_round(&res.squeeze(0)?, 4)?, [ [-1.5596, -1.8099, 2.0407, 4.8764, -0.1743, -0.735, -0.7819], [0.7816, 3.8152, -0.5926, 2.2515, -5.1844, -0.3157, 1.4721], [1.6295, 0.52, 6.2611, 0.7109, 2.6315, -1.8793, 0.7113], [1.0949, 1.0166, 1.7464, 2.4561, -0.79, -0.5119, 0.1488] ] ); } Ok(()) } fn conv1d_small(dev: &Device) -> Result<()> { let t = Tensor::new(&[0.4056f32, -0.8689, -0.0773, -1.5630], dev)?.reshape((1, 1, 4))?; let w = Tensor::new(&[1f32, 0., 0.], dev)?.reshape((1, 1, 3))?; let res = t.conv1d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 2]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.4056, -0.8689] ); let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 4]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.0, 0.4056, -0.8689, -0.0773], ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5, 5)) w = torch.randn((2, 4, 3, 3)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t, w_t) print(res.shape) print(res) res = torch.nn.functional.conv2d(t, w, dilation=2) print(res.shape) print(res[0]) res = torch.nn.functional.conv_transpose2d(t, w_t, dilation=2) print(res.shape) print(res) */ fn conv2d(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006, -0.8, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085, ], dev, )?; let w = Tensor::new( &[ -0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273, -2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514, -0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027, 0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667, 0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679, -0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646, 1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, ], dev, )?; let t = t.reshape((1, 4, 5, 5))?; let w = w.reshape((2, 4, 3, 3))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ -4.2812, 2.0923, 5.2187, 7.5184, 0.752, -14.9426, 10.0087, 4.391, 0.2918, 1.6715, 10.389, 3.6023, -4.2808, 0.2672, 5.3646, -5.2023, -2.1955, -9.4075 ] ); let res = { let t = Tensor::cat(&[&t.zeros_like()?, &t, &t.zeros_like()?], 0)?; t.conv2d(&w, 0, 1, 1, 1)? }; assert_eq!(res.dims(), [3, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.i(0)?.flatten_all()?, 4)?, [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] ); assert_eq!( test_utils::to_vec1_round(&res.i(1)?.flatten_all()?, 4)?, [ -4.2812, 2.0923, 5.2187, 7.5184, 0.752, -14.9426, 10.0087, 4.391, 0.2918, 1.6715, 10.389, 3.6023, -4.2808, 0.2672, 5.3646, -5.2023, -2.1955, -9.4075 ] ); let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 2, 7, 7]); assert_eq!( test_utils::to_vec3_round(&res.i(0)?, 4)?, [ [ [-1.9918, 2.6797, -0.4599, -1.6037, 1.4131, -2.4012, 2.9277], [1.8016, -3.5361, 1.0757, 3.5395, -8.2168, -3.2023, 0.5375], [0.8243, 1.8675, 7.8929, -4.0746, -6.4415, 5.1139, 1.6889], [0.2722, 8.9679, 3.3477, 1.8514, -4.2896, -3.8228, -7.5632], [-8.5412, -5.8142, -7.1587, -1.6095, 0.4651, 0.2748, -2.0985], [2.0833, -0.6482, -12.1692, -4.1284, -2.9765, -0.0656, -4.5114], [5.307, 2.6957, 2.3087, 1.0478, 0.7808, -1.1519, -0.9579] ], [ [1.089, 0.1872, -0.6408, -0.9897, 0.8503, 1.1019, -0.9211], [-0.1741, -0.2915, 4.2472, 1.9417, 1.65, 0.6303, -4.7131], [1.6555, 2.4026, -2.9293, 2.9953, 0.5328, 3.5873, -0.9621], [-1.4289, -3.2787, 4.1747, -6.0341, -4.6341, -5.7945, 4.142], [7.5973, 6.4431, 5.9872, 2.1639, -8.6566, 3.3143, -3.4059], [-0.8775, -3.048, 11.6543, 0.6442, 2.3218, -0.4765, 1.1516], [-5.5423, -2.5188, 1.0754, -0.0563, -2.9386, -1.1504, 1.0171] ] ] ); // Dilations. let res = t.conv2d(&w, 0, 1, 2, 1)?; assert_eq!(res.dims(), [1, 2, 1, 1]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.45, -2.3504], ); // Transpose and dilations. let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 2)?; assert_eq!(res.dims(), [1, 2, 9, 9]); assert_eq!( test_utils::to_vec3_round(&res.i(0)?, 4)?, [ [ [-1.9918, 3.1652, -0.6778, -4.3442, 4.4351, 0.6652, -3.0124, -0.6031, 2.9277], [2.7036, -1.7156, -0.3969, 1.0516, 1.6381, -2.8886, -0.205, 2.4682, -1.0499], [-0.9459, 3.1631, 3.707, -4.8369, -8.5166, -1.4496, -2.7559, -3.2698, 1.4376], [-0.2157, 3.7786, -2.0252, -4.2633, 3.6731, -1.5142, 5.9391, -0.2622, -0.141], [-6.8121, -3.1744, 1.5945, 3.0637, -9.6088, 1.4446, 2.9489, -3.0082, -7.3822], [0.2371, 3.3303, 0.3861, 2.2646, -4.6784, 4.1235, -0.0109, 0.3176, -0.03], [-2.5339, -2.9564, -3.4518, -4.4594, -9.1873, -1.9709, -0.4676, 0.51, -3.5024], [4.007, 0.3067, -2.2954, 1.1105, -0.1992, 1.6372, -2.9268, 0.2807, -1.2787], [5.307, 1.1317, 1.3518, 0.9049, 3.8116, -0.4075, -0.8874, -0.2241, -0.9579] ], [ [1.089, -0.6483, 0.0726, -0.4752, -1.3283, 1.7103, 1.0703, 0.1076, -0.9211], [-0.8629, 0.1376, 0.3202, 2.0955, 0.9696, 2.8988, -1.0012, 1.5049, -0.1278], [1.9286, -1.5255, -2.9563, 2.4589, 3.3611, -0.6951, 0.3525, -1.7724, -5.9861], [1.1226, 2.1561, 3.6417, 4.7546, -0.692, 4.4126, -5.1902, 6.0805, 2.3185], [1.0111, 0.3604, 0.6432, -3.6605, 7.9517, -9.2955, -5.2988, -3.7803, -2.0642], [3.3172, -1.7967, -3.6576, -2.0942, 1.3158, 0.112, -1.7405, 2.9167, 0.7957], [5.1001, 1.8995, -1.8639, 1.1262, 9.9629, 2.683, -3.6319, -1.1607, 0.5856], [-4.8445, -0.5642, 4.2317, 0.0856, 1.2267, -0.5712, 1.736, 1.0997, 0.6908], [-5.5423, -1.1831, -1.2176, 0.0843, 0.0446, -0.7545, -2.4798, -0.0827, 1.0171] ] ] ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 3, 3)) w = torch.randn((1, 2, 1, 1)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t, w_t) print(res.shape) print(res.flatten()) t_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t_t, w) print(res.shape) print(res.flatten()) */ fn conv2d_small(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, ], dev, )?; let w = Tensor::new(&[-0.9259f32, 1.3017], dev)?; let t = t.reshape((1, 2, 3, 3))?; let w = w.reshape((1, 2, 1, 1))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539] ); let res = t.conv2d(&w, 2, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 7, 7]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1640, -0.0111, -0.1742, 0.0, 0.0, 0.0, 0.0, 2.6437, -2.0268, 1.1823, 0.0, 0.0, 0.0, 0.0, 3.2855, -1.0324, 0.2539, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] ); let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 1, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539], ); let res = t.transpose(0, 1)?.conv_transpose2d(&w, 0, 0, 1, 1)?; assert_eq!(res.dims(), [2, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ -0.3755, 0.8045, -0.6336, -0.2218, -1.1369, 0.8599, 1.5768, -0.1268, -0.1728, 0.528, -1.131, 0.8908, 0.3118, 1.5984, -1.2089, -2.2168, 0.1783, 0.2429, -0.3838, 0.5802, -0.3268, -2.0382, 0.6329, -0.2293, -1.2154, 0.6441, -0.3035, 0.5396, -0.8156, 0.4594, 2.8654, -0.8898, 0.3224, 1.7087, -0.9056, 0.4267 ] ); Ok(()) } fn conv2d_smaller(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, ], dev, )?; let w = Tensor::new(&[1f32, 1., 1., 1., 1., 1., 1., 1., 1.], dev)?; let t = t.reshape((1, 1, 3, 3))?; let w = w.reshape((1, 1, 3, 3))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 1, 1]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [-0.6197] ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 2)) w = torch.randn((1, 2, 1, 1)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) */ fn conv2d_non_square(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, ], dev, )?; let w = Tensor::new(&[-1.1351f32, 1.3841], dev)?; let t = t.reshape((1, 2, 4, 2))?; let w = w.reshape((1, 2, 1, 1))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 4, 2]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.2312, 5.2238, 2.3772, 1.9076, 2.0256, -0.5776, -1.6028, -1.467] ); Ok(()) } /* import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5, 5), requires_grad=True) w = torch.randn((2, 4, 3, 3), requires_grad=True) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) loss = (res ** 2).sum() print(loss) loss.backward() print(t.grad.shape) print(t.grad.flatten()) print(w.grad.shape) print(w.grad.flatten()) t.grad.zero_() w.grad.zero_() res = torch.nn.functional.conv2d(t, w, stride=2) print(res.flatten()) loss = (res ** 2).sum() print(loss) loss.backward() print(t.grad.shape) print(t.grad[0]) print(w.grad.shape) print(w.grad[0]) */ fn conv2d_grad(dev: &Device) -> Result<()> { // conv-transposes are not implemented for metal use candle_core::Var; let t = Var::from_slice( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006, -0.8, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085, ], (1, 4, 5, 5), dev, )?; let w = Var::from_slice( &[ -0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273, -2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514, -0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027, 0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667, 0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679, -0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646, 1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, ], (2, 4, 3, 3), dev, )?; let res = t.conv2d(&w, 0, 1, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 741.12f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec1_round(&grad_t.flatten_all()?, 2)?, [ 9.29, -2.84, -5.71, 3.38, -7.71, -19.15, 7.02, 29.1, 9.34, 34.73, -22.87, 24.35, -39.88, -14.01, 21.08, 9.94, 13.63, -34.68, 11.21, -6.26, 7.72, -6.32, -16.64, -1.08, -20.22, 21.73, -0.37, -4.06, 5.82, -3.65, -30.73, 14.55, 87.7, 31.6, 4.53, -89.78, -75.37, -57.43, -7.56, 92.96, 18.79, -4.63, -159.75, -42.47, -47.26, 52.88, 37.32, 49.0, 12.82, 2.01, -8.98, 20.18, 16.62, 12.06, 15.38, 20.0, 2.57, -15.22, 72.62, -10.75, 2.25, -31.2, 3.75, -0.2, 9.76, -0.68, 5.21, -40.44, -22.59, -61.61, 17.28, 20.41, 37.55, 5.23, 6.81, 23.54, 23.62, -9.99, -9.13, 4.87, -35.06, -26.1, 63.48, 25.81, -39.21, -70.68, -46.96, 2.33, 41.81, 82.42, -28.63, -11.78, -35.33, -10.28, -28.57, -9.13, 7.21, -9.05, -9.62, -11.25 ] ); assert_eq!( test_utils::to_vec1_round(&grad_w.flatten_all()?, 2)?, [ -28.92, -22.88, -141.23, 73.35, 61.07, 47.81, -20.0, -73.71, -41.82, -13.59, 21.5, 28.72, 28.57, -46.85, -90.19, 143.61, 16.68, 7.43, 18.88, -90.81, -20.29, 54.79, 82.63, 22.94, 77.81, -16.39, -13.2, 9.34, -40.39, -26.62, 5.33, -60.91, 9.09, -59.37, 7.08, 58.64, 5.55, 20.52, 2.5, -17.25, -6.8, 22.21, 30.15, -7.52, -37.46, 5.67, 22.58, 9.03, 47.05, 17.61, 37.31, -98.13, -14.61, -4.8, -6.36, 44.69, 23.34, 8.37, -13.52, 80.05, -34.24, -16.36, -12.31, 1.92, -33.62, -14.1, -49.23, -7.39, 11.5, -9.98, 9.66, 29.6 ] ); // Same as before but with stride. let res = t.conv2d(&w, 0, 2, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 277.16f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 2)?, [ [ [9.29, -7.03, 0.94, 3.49, -7.71], [-1.8, -7.82, 8.9, 8.46, 7.43], [-25.84, 22.09, -19.27, -0.22, 1.69], [4.02, 18.53, -18.37, 2.3, -24.51], [7.72, -9.68, -12.34, 5.6, -20.22] ], [ [21.73, 3.39, -18.27, 3.86, -3.65], [8.25, 3.73, 30.73, -8.61, -11.93], [-72.15, -15.36, -17.53, -12.32, -1.61], [-22.32, -7.79, -91.82, 6.44, -37.69], [52.88, 14.44, 42.75, 9.88, 2.01] ], [ [-8.98, 9.91, 6.75, -4.68, 15.38], [4.93, -0.33, 9.94, -1.46, 14.78], [13.62, -30.63, 3.96, -3.58, -4.48], [-14.13, 1.19, -34.43, 3.08, -33.83], [17.28, 12.94, 31.83, -3.35, 6.81] ], [ [23.54, 6.98, -24.52, 0.52, 4.87], [9.65, 6.18, 1.71, -25.23, -4.93], [-54.99, -23.66, 3.19, -3.73, 18.58], [-21.35, -10.39, -39.88, 28.73, -30.76], [-9.13, 11.12, -14.0, -8.23, -11.25] ] ] ); assert_eq!( test_utils::to_vec3_round(&grad_w.i(0)?, 2)?, [ [ [28.34, -7.91, -45.75], [21.03, 3.86, 29.86], [0.72, -36.58, -35.28] ], [ [-16.04, 11.53, -16.38], [29.62, -16.32, -48.35], [57.5, 28.29, 25.81] ], [ [2.93, -19.6, 1.57], [27.15, 53.88, -24.64], [12.74, -22.6, -26.2] ], [ [-0.18, -14.86, -6.82], [-19.55, -2.72, 45.9], [-2.54, 36.97, 27.11] ] ] ); // Replicate the issue from https://github.com/huggingface/candle/issues/1212 let res = t.i((.., .., 0..4, 0..4))?.conv2d(&w, 0, 2, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 21.12f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 2)?, [ [ [9.29, -7.03, 7.87, 0.0, 0.0], [-1.8, -7.82, 5.9, 0.0, 0.0], [-3.12, 4.49, 5.52, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [21.73, 3.39, 4.77, 0.0, 0.0], [8.25, 3.73, 27.61, 0.0, 0.0], [-20.55, -5.61, -2.77, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [-8.98, 9.91, -7.15, 0.0, 0.0], [4.93, -0.33, 4.56, 0.0, 0.0], [-6.7, -5.76, -8.05, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [23.54, 6.98, -10.0, 0.0, 0.0], [9.65, 6.18, 18.72, 0.0, 0.0], [3.29, -5.27, 0.79, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ] ] ); assert_eq!( test_utils::to_vec3_round(&grad_w.i(0)?, 2)?, [ [ [-3.47, 7.44, 0.66], [12.89, -3.4, -9.29], [-14.16, -0.83, 7.14] ], [ [-3.23, 5.37, -3.02], [-2.12, -11.24, 1.94], [6.97, 7.2, 2.99] ], [ [-4.04, -3.31, 4.87], [-6.68, -5.68, 1.73], [-5.54, 4.32, 0.52] ], [[-4.72, 1.5, 4.72], [3.79, 4.04, 6.76], [-4.6, 5.8, 6.93]] ] ); // Conv Transpose 2d Test //tested against following python // import torch // torch.manual_seed(4242) // padding = 4 // outpadding = 2 // dilation = 3 // stride = 3 // input = torch.randn((1, 4, 7, 5), requires_grad=True) // kernel = torch.randn((4, 2, 3, 5), requires_grad=True) // print("input", input.flatten()) // print("kernel", kernel.flatten()) // res = torch.nn.functional.conv_transpose2d( // input, // kernel, // stride=stride, // padding=padding, // dilation=dilation, // output_padding=outpadding, // ) // res.retain_grad() // print(res.shape) // loss = (res**2).sum() // print(loss) // loss.backward() // print(input.grad.shape) // print("input grad", torch.round(input.grad, decimals=1)) // print(kernel.grad.shape) // print("kernel grad", torch.round(kernel.grad.flatten(), decimals=1)) let padding = 4; let outpadding = 2; let dilation = 3; let stride = 3; let t = Var::from_slice( &[ 0.4056_f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.5411, -1.1715, -2.4997, 2.3249, -0.8912, -0.4733, -0.5701, -2.8888, -1.4112, -0.5471, -0.9234, -1.1660, 0.4189, -0.7465, -0.6473, 0.1402, 0.7875, 0.5377, -0.6779, -0.8088, -0.4864, -0.2312, 0.9279, 0.1264, 1.5480, 0.8265, -0.1025, 0.5138, -0.2512, 0.1576, 1.2705, 0.3641, -0.9325, 0.6451, -0.8537, 0.2378, 0.1794, 0.2752, -0.3687, -1.1149, -0.1410, -0.5829, -0.0892, 1.4258, -2.2789, 0.5270, 0.1825, 1.7007, -0.5263, -0.2954, 0.4440, 0.5537, 0.3492, 0.6186, 1.6475, 0.2219, ], (1, 4, 7, 5), dev, )?; #[rustfmt::skip] let w = Var::from_slice( &[ -1.1744_f32, 0.3266, 2.5893, 1.0142, 0.1763, 0.7752, 0.6604, 0.2029, -0.2145, 0.7234, -0.3441, -1.5400, -0.6333, 0.6613, 0.2083, 0.6230, -1.7002, 0.3393, 0.4049, 1.0762, 0.2723, 1.4181, 0.0029, -0.2122, 1.7668, 1.4168, 0.3320, -0.2719, 0.7932, -0.7204, 0.4447, 0.1211, 0.5908, 1.0089, -0.1646, 1.8033, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, 0.8828, 2.4990, 0.6811, -0.3369, 1.3320, 1.7669, -1.1067, 1.2958, -0.9415, -0.9655, -0.4462, 0.7181, 0.5181, -1.1658, -1.8467, -0.7763, 1.2769, 0.8651, 0.9890, 1.5092, 0.7207, -0.8481, 0.7417, 0.3375, -1.2685, 1.4572, 1.0915, 0.1093, -0.8550, -0.5831, -0.6309, -0.2509, 0.5220, -0.0914, 0.7900, 0.1096, 0.3258, 0.2723, -1.0942, -0.3393, -0.1653, 0.5732, -0.8014, 1.8194, -1.9023, 0.2127, 1.8636, -0.8979, 0.1927, -0.2778, 0.3105, 0.0071, -1.1823, 0.2476, -0.7178, -1.3821, 1.0769, -0.4376, -0.9967, -0.1227, 1.6197, -1.0604, 0.1372, 0.8141, -0.6163, 0.7304, -0.8285, 2.0636, -0.7176, 0.2495, -0.2581, -0.4478, ], (4, 2, 3, 5), dev, )?; let res = t.conv_transpose2d(&w, padding, outpadding, stride, dilation)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 0)?, 2904.0); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 7, 5]); assert_eq!(grad_w.dims(), [4, 2, 3, 5]); assert_eq!( test_utils::to_vec1_round(&grad_w.flatten_all()?, 1)?, [ // torch gets 89.1 -89.0, -135.3, 136.7, 102.0, -53.4, 117.9, 118.6, -43.9, -218.0, -58.5, -114.3, -150.0, -15.6, 172.1, 66.3, -64.3, -27.9, -19.8, 31.7, 62.1, 5.5, 92.6, 28.2, -29.6, 55.9, 52.7, -72.7, -119.8, 53.8, -25.5, 128.8, 19.3, 68.0, 190.9, -64.1, -86.2, -111.2, 106.6, -67.7, 37.8, 115.9, 50.4, -77.7, -54.9, 22.3, -4.6, 89.8, 61.7, 122.4, 192.6, -27.8, -104.6, 57.0, 166.4, 27.1, 6.1, 18.7, -93.2, 31.5, 168.2, -3.7, -99.5, -55.5, -10.8, 17.5, 20.8, 16.9, 43.8, 42.0, -89.2, 18.8, -9.6, -84.1, 212.6, 19.7, -50.0, -52.0, -40.0, -166.6, -73.2, -10.8, -73.3, 31.5, -23.4, -79.3, -27.0, -84.4, -42.9, -20.3, 51.8, -16.7, 76.3, -120.5, -65.8, 96.5, -10.7, -45.9, -88.1, 65.4, -7.0, -1.5, 92.8, -25.1, -114.2, -5.8, -14.8, -51.2, -20.7, 54.2, -79.8, 47.7, -29.2, -8.8, 53.5, -28.4, 85.0, -18.3, 107.0, 28.3, -71.8 ] ); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 1)?, [ [ [32.3, -41.6, -24.0, 14.1, 17.6], [-11.8, 72.5, 87.6, 46.4, 61.5], [115.0, 108.5, -48.6, -63.4, -50.0], [51.3, 5.4, 31.3, 91.1, -30.9], [52.7, 92.8, -68.0, -47.0, 83.0], // pytorch gets -107.1 [-10.2, -107.0, -5.4, 213.1, -31.4], [-2.4, 65.1, 9.2, -146.2, -24.2] ], [ [-72.6, -63.9, -61.9, 45.3, 33.0], [79.3, -0.5, -26.2, 78.2, 42.7], [90.9, 141.6, 40.1, -62.7, 37.0], [32.8, 198.2, -0.8, -31.1, 27.3], // torch gets 48.0 [34.5, 34.9, -47.9, 127.6, -12.3], [-61.4, -3.2, -2.9, -10.9, -16.6], [74.6, 60.1, -68.9, 34.5, -50.4] ], [ [37.5, -56.9, -43.6, -13.5, -9.9], [40.0, 97.3, 28.6, 14.2, -30.1], [-22.3, -126.3, -68.8, -8.2, 26.1], [-32.9, 37.3, 108.5, -54.8, 29.6], [34.9, -176.9, -125.0, -28.3, -13.9], [-54.9, 142.6, 62.1, -80.4, -65.6], [7.4, -91.1, -67.6, 35.0, 39.7] ], [ [-57.2, -40.9, -10.1, 32.6, 29.4], [18.7, -18.0, 29.5, -1.2, 59.2], [-14.0, -74.4, 19.8, -117.0, 58.2], [-21.8, 163.5, -71.1, -99.0, 80.9], [-58.9, -10.9, 93.8, -139.6, 98.0], // torch gets 54.5 [-54.4, 135.3, 6.0, -79.1, 134.6], [27.5, -76.0, 43.4, -2.8, -7.8] ] ] ); // Test the same, but then with the following properties, t & w are unmodified. let padding = 1; let outpadding = 1; let dilation = 1; let stride = 2; let res = t.conv_transpose2d(&w, padding, outpadding, stride, dilation)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 0)?, 3627.0); // torch gives 3626.8560 let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 7, 5]); assert_eq!(grad_w.dims(), [4, 2, 3, 5]); #[rustfmt::skip] assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 1)?, [ [ [ 13.2, -40.7, -9.7, -47.3, -82.7], [ -98.2, 9.7, 57.7, -6.2, 180.7], [ 100.2, 24.1, 3.7, -100.5, -48.1], [ -0.3, 13.5, -2.9, 80.0, -49.8], [ 47.2, -25.6, -74.4, 61.2, -18.4], [ 4.6, -69.5, 27.9, 66.5, -88.1], // 4th column on next row; torch is 4.2 [ -12.0, 79.2, -40.0, 4.1, -97.1], ], [ [ -42.2, -36.5, -51.1, 7.5, 32.3], [ 74.1, -44.6, -68.8, 19.5, 7.7], [ 137.1, 54.2, 153.8, -58.0, 45.5], [ 24.4, -56.8, 9.7, -41.0, -14.5], [ -3.7, 72.6, 8.3, 134.8, 40.5], [ 43.2, -56.9, -47.5, -89.4, -95.4], [ 68.2, 108.1, -80.0, 57.0, -121.1] ], [ [ 31.1, -11.4, -34.8, 33.1, -44.2], [ 29.4, -31.6, -40.2, 13.7, 13.1], [ -0.8, -83.8, -7.8, -17.3, 78.2], [ 12.0, -118.7, 137.5, -76.7, 50.8], [ -28.7, -114.2, -3.7, -96.3, -13.8], [ -31.8, 28.5, -14.3, 4.6, 13.4], [ 28.0, -0.2, -38.9, -29.7, -59.0] ], [ [ -16.8, 38.5, 15.5, 26.6, 48.9], [ 14.5, 49.6, -24.8, 65.6, 61.7], [ 22.1, -64.7, -4.3, -51.0, 36.3], [ 31.0, -88.9, 47.1, -123.5, -3.8], [ -14.8, -39.8, 128.2, -110.3, 42.6], // 1st column on next row; torch is -7.2 [ -7.1, 95.3, -21.3, -58.7, -13.9], [ 26.9, 21.3, 16.1, 70.3, 32.1] ] ] ); #[rustfmt::skip] assert_eq!( test_utils::to_vec1_round(&grad_w.flatten_all()?, 1)?, [ // 2nd value; torch gets -3.2, 3rd value; torch gets 221.8 -2.460e+01, -3.100e+00, 2.219e+02, 7.400e+00, 5.620e+01, 7.420e+01, 7.830e+01, 8.900e+00, 1.050e+01, 2.810e+01, 5.100e+00, -1.046e+02, -1.572e+02, 8.710e+01, -9.840e+01, -4.230e+01, -1.898e+02, 1.860e+01, -3.570e+01, 9.810e+01, 4.680e+01, 1.182e+02, 4.020e+01, -1.900e+00, 1.508e+02, 1.094e+02, 1.018e+02, -4.620e+01, 1.591e+02, -2.320e+01, // 5th value; torch gets 7.1 -8.450e+01, -4.600e+00, 6.330e+01, 1.123e+02, -7.000e+00, 1.101e+02, -6.620e+01, 2.090e+01, -5.120e+01, 8.990e+01, 9.050e+01, -6.990e+01, 6.800e+01, -9.250e+01, 1.380e+02, 4.720e+01, 4.710e+01, 6.210e+01, 8.870e+01, 2.098e+02, 3.870e+01, -1.390e+01, 6.270e+01, 1.484e+02, -9.920e+01, -4.200e+01, -1.505e+02, -1.480e+01, -2.620e+01, 8.220e+01, -3.350e+01, -2.260e+01, -1.198e+02, -5.080e+01, 1.259e+02, 5.600e+01, 9.270e+01, 1.209e+02, 6.590e+01, -8.330e+01, 7.000e+00, -2.600e+01, -1.133e+02, 3.870e+01, 4.020e+01, -6.300e+00, -8.710e+01, -5.150e+01, -8.510e+01, 2.000e-01, 3.640e+01, -6.100e+00, 6.590e+01, -2.700e+00, 6.550e+01, // 4th value; torch gets 3.8 5.300e+00, -6.760e+01, -4.270e+01, -3.900e+00, 2.880e+01, 5.260e+01, 6.170e+01, -1.203e+02, -1.610e+01, 7.740e+01, -1.008e+02, -1.070e+01, -9.900e+00, 3.300e+00, -2.620e+01, -4.440e+01, 2.580e+01, -6.920e+01, -4.220e+01, 1.108e+02, 1.240e+01, -3.440e+01, -2.800e+00, 7.880e+01, -6.690e+01, 1.480e+01, 2.310e+01, -4.260e+01, -1.500e+00, -4.760e+01, 5.350e+01, -2.260e+01, 8.000e-01, -3.840e+01, -2.500e+00 ] ); Ok(()) } test_device!(conv1d, conv1d_cpu, conv1d_gpu, conv1d_metal); test_device!( conv1d_small, conv1d_small_cpu, conv1d_small_gpu, conv1d_small_metal ); test_device!(conv2d, conv2d_cpu, conv2d_gpu, conv2d_metal); test_device!( conv2d_non_square, conv2d_non_square_cpu, conv2d_non_square_gpu, conv2d_non_square_metal ); test_device!( conv2d_small, conv2d_small_cpu, conv2d_small_gpu, conv2d_small_metal ); test_device!( conv2d_smaller, conv2d_smaller_cpu, conv2d_smaller_gpu, conv2d_smaller_metal ); test_device!( conv2d_grad, conv2d_grad_cpu, conv2d_grad_gpu, conv2_grad_metal );
candle/candle-core/tests/conv_tests.rs/0
{ "file_path": "candle/candle-core/tests/conv_tests.rs", "repo_id": "candle", "token_count": 23453 }
28
#![allow(unused)] use anyhow::{Context, Result}; use std::io::Write; use std::path::PathBuf; struct KernelDirectories { kernel_glob: &'static str, rust_target: &'static str, include_dirs: &'static [&'static str], } const KERNEL_DIRS: [KernelDirectories; 1] = [KernelDirectories { kernel_glob: "examples/custom-ops/kernels/*.cu", rust_target: "examples/custom-ops/cuda_kernels.rs", include_dirs: &[], }]; fn main() -> Result<()> { println!("cargo:rerun-if-changed=build.rs"); #[cfg(feature = "cuda")] { for kdir in KERNEL_DIRS.iter() { let builder = bindgen_cuda::Builder::default().kernel_paths_glob(kdir.kernel_glob); println!("cargo:info={builder:?}"); let bindings = builder.build_ptx().unwrap(); bindings.write(kdir.rust_target).unwrap() } } Ok(()) }
candle/candle-examples/build.rs/0
{ "file_path": "candle/candle-examples/build.rs", "repo_id": "candle", "token_count": 391 }
29
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, Device, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use candle_transformers::models::clip; use tokenizers::Tokenizer; #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long, use_value_delimiter = true)] images: Option<Vec<String>>, #[arg(long)] cpu: bool, #[arg(long, use_value_delimiter = true)] sequences: Option<Vec<String>>, } fn load_image<T: AsRef<std::path::Path>>(path: T, image_size: usize) -> anyhow::Result<Tensor> { let img = image::ImageReader::open(path)?.decode()?; let (height, width) = (image_size, image_size); let img = img.resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let img = img.into_raw(); let img = Tensor::from_vec(img, (height, width, 3), &Device::Cpu)? .permute((2, 0, 1))? .to_dtype(DType::F32)? .affine(2. / 255., -1.)?; Ok(img) } fn load_images<T: AsRef<std::path::Path>>( paths: &Vec<T>, image_size: usize, ) -> anyhow::Result<Tensor> { let mut images = vec![]; for path in paths { let tensor = load_image(path, image_size)?; images.push(tensor); } let images = Tensor::stack(&images, 0)?; Ok(images) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.repo(hf_hub::Repo::with_revision( "openai/clip-vit-base-patch32".to_string(), hf_hub::RepoType::Model, "refs/pr/15".to_string(), )); api.get("model.safetensors")? } Some(model) => model.into(), }; let tokenizer = get_tokenizer(args.tokenizer)?; let config = clip::ClipConfig::vit_base_patch32(); let device = candle_examples::device(args.cpu)?; let vec_imgs = match args.images { Some(imgs) => imgs, None => vec![ "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(), "candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(), ], }; let images = load_images(&vec_imgs, config.image_size)?.to_device(&device)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(std::slice::from_ref(&model_file), DType::F32, &device)? }; let model = clip::ClipModel::new(vb, &config)?; let (input_ids, vec_seq) = tokenize_sequences(args.sequences, &tokenizer, &device)?; let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?; println!("softmax_image_vec: {softmax_image_vec:?}"); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) .collect::<Vec<f32>>(); let probability_per_image = probability_vec.len() / vec_imgs.len(); for (i, img) in vec_imgs.iter().enumerate() { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; println!("\n\nResults for image: {img}\n"); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {} ", p, vec_seq[i]); } } Ok(()) } pub fn get_tokenizer(tokenizer: Option<String>) -> anyhow::Result<Tokenizer> { let tokenizer = match tokenizer { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.repo(hf_hub::Repo::with_revision( "openai/clip-vit-base-patch32".to_string(), hf_hub::RepoType::Model, "refs/pr/15".to_string(), )); api.get("tokenizer.json")? } Some(file) => file.into(), }; Tokenizer::from_file(tokenizer).map_err(E::msg) } pub fn tokenize_sequences( sequences: Option<Vec<String>>, tokenizer: &Tokenizer, device: &Device, ) -> anyhow::Result<(Tensor, Vec<String>)> { let pad_id = *tokenizer .get_vocab(true) .get("<|endoftext|>") .ok_or(E::msg("No pad token"))?; let vec_seq = match sequences { Some(seq) => seq, None => vec![ "a cycling race".to_string(), "a photo of two cats".to_string(), "a robot holding a candle".to_string(), ], }; let mut tokens = vec![]; for seq in vec_seq.clone() { let encoding = tokenizer.encode(seq, true).map_err(E::msg)?; tokens.push(encoding.get_ids().to_vec()); } let max_len = tokens.iter().map(|v| v.len()).max().unwrap_or(0); // Pad the sequences to have the same length for token_vec in tokens.iter_mut() { let len_diff = max_len - token_vec.len(); if len_diff > 0 { token_vec.extend(vec![pad_id; len_diff]); } } let input_ids = Tensor::new(tokens, device)?; Ok((input_ids, vec_seq)) }
candle/candle-examples/examples/clip/main.rs/0
{ "file_path": "candle/candle-examples/examples/clip/main.rs", "repo_id": "candle", "token_count": 2490 }
30
// This example illustrates how to implement custom operations. These operations can provide their // own forward pass (CPU and GPU versions) as well as their backward pass. // // In this example we add the RMS normalization operation and implement it for f32. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[rustfmt::skip] #[cfg(feature = "cuda")] mod cuda_kernels; use clap::Parser; use candle::{CpuStorage, CustomOp1, Layout, Result, Shape, Tensor}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } struct LayerNorm { eps: f32, } impl CustomOp1 for LayerNorm { fn name(&self) -> &'static str { "layer-norm" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { let (dim1, dim2) = layout.shape().dims2()?; let slice = storage.as_slice::<f32>()?; let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &slice[o1..o2], }; let mut dst = Vec::with_capacity(dim1 * dim2); for idx1 in 0..dim1 { let src = &src[idx1 * dim2..(idx1 + 1) * dim2]; let variance = src.iter().map(|x| x * x).sum::<f32>(); let s_variance = 1f32 / (variance / dim2 as f32 + self.eps).sqrt(); dst.extend(src.iter().map(|x| x * s_variance)) } let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, layout.shape().clone())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::{LaunchConfig, PushKernelArg}; use candle::cuda_backend::WrapErr; let (d1, d2) = layout.shape().dims2()?; let d1 = d1 as u32; let d2 = d2 as u32; let dev = storage.device().clone(); let slice = storage.as_cuda_slice::<f32>()?; let slice = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => slice.slice(o1..o2), }; let elem_count = layout.shape().elem_count(); let dst = unsafe { dev.alloc::<f32>(elem_count) }?; let func = dev.get_or_load_custom_func("rms_f32", "mymodule", cuda_kernels::LAYERNORM_KERNELS)?; let cfg = LaunchConfig { grid_dim: (d1, 1, 1), block_dim: (d2, 1, 1), shared_mem_bytes: 0, }; let mut builder = func.builder(); builder.arg(&dst); builder.arg(&slice); candle::builder_arg!(builder, self.eps, d1, d2); unsafe { builder.launch(cfg) }.w()?; let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev); Ok((dst, layout.shape().clone())) } } fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let t = Tensor::arange(0f32, 14f32, &device)?.reshape((2, 7))?; println!("{t}"); let t = t.apply_op1(LayerNorm { eps: 1e-5 })?; println!("{t}"); Ok(()) }
candle/candle-examples/examples/custom-ops/main.rs/0
{ "file_path": "candle/candle-examples/examples/custom-ops/main.rs", "repo_id": "candle", "token_count": 1534 }
31
# candle-efficientvit [EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention](https://arxiv.org/abs/2305.07027). This candle implementation uses a pre-trained EfficientViT (from Microsoft Research Asia) network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example efficientvit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which m1 loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 69.80% unicycle, monocycle : 13.03% bicycle-built-for-two, tandem bicycle, tandem: 9.28% crash helmet : 2.25% alp : 0.46% ```
candle/candle-examples/examples/efficientvit/README.md/0
{ "file_path": "candle/candle-examples/examples/efficientvit/README.md", "repo_id": "candle", "token_count": 273 }
32
# candle-gemma: 2b and 7b LLMs from Google DeepMind [Gemma](https://ai.google.dev/gemma/docs) is a collection of lightweight open models published by Google Deepmind with a 2b and a 7b variant for the first version, and a 2b and a 9b variant for v2. ## Running the example ```bash $ cargo run --example gemma --features cuda -r -- \ --prompt "Here is a proof that square root of 2 is not rational: " Here is a proof that square root of 2 is not rational: Let us assume it to be rational. Then, we can write √2 = p/q where q ≠ 0 and p and q are integers with no common factors other than 1. Squaring both sides gives us (p/q)^2 = 2 or p^2/q^2 = 2. This implies that p^2 is divisible by 2, which means that p must be even. Let us write p = 2m where m is an integer. Substituting this in the above equation we get: (p^2)/q^2 = 2 or (4m^2)/q^2 = 2 or q^2/2m^2 = 1 which implies that q^2 must be divisible by 2, and hence q is even. This contradicts our assumption that p and q have no common factors other than 1. Hence we conclude that √2 cannot be rational. ``` ## Access restrictions In order to use the v1 examples, you have to accept the license on the [HuggingFace Hub Gemma repo](https://huggingface.co/google/gemma-7b) and set up your access token via the [HuggingFace cli login command](https://huggingface.co/docs/huggingface_hub/guides/cli#huggingface-cli-login).
candle/candle-examples/examples/gemma/README.md/0
{ "file_path": "candle/candle-examples/examples/gemma/README.md", "repo_id": "candle", "token_count": 441 }
33
// https://github.com/karpathy/llama2.c #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use candle_transformers::models::llama2_c as model; use candle_transformers::models::llama2_c_weights as weights; use candle_transformers::models::quantized_llama2_c as qmodel; mod training; use clap::{Parser, Subcommand}; use anyhow::{Error as E, Result}; use byteorder::{LittleEndian, ReadBytesExt}; use candle::{IndexOp, Tensor}; use candle_transformers::generation::LogitsProcessor; use std::io::Write; use tokenizers::Tokenizer; use model::{Cache, Config, Llama}; use qmodel::QLlama; use weights::TransformerWeights; #[derive(Parser, Debug, Clone)] struct InferenceCmd { /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, #[arg(long, default_value = "")] prompt: String, /// Config file in binary or safetensors format. #[arg(long)] config: Option<String>, #[arg(long, default_value = "karpathy/tinyllamas")] model_id: String, /// The model to be used when getting it from the hub. Possible /// values are 'stories15M.bin', 'stories42M.bin', see more at: /// https://huggingface.co/karpathy/tinyllamas/tree/main #[arg(long, default_value = "stories15M.bin")] which_model: String, } #[derive(Parser, Debug, Clone)] struct EvaluationCmd { /// A directory with the pre-tokenized dataset in the format generated by the tinystories.py /// script from llama2.c https://github.com/karpathy/llama2.c #[arg(long)] pretokenized_dir: Option<String>, #[arg(long, default_value_t = 32)] batch_size: usize, /// Config file in binary format. #[arg(long)] config: Option<String>, #[arg(long, default_value = "karpathy/tinyllamas")] model_id: String, /// The model to be used when getting it from the hub. Possible /// values are 'stories15M.bin', 'stories42M.bin', see more at: /// https://huggingface.co/karpathy/tinyllamas/tree/main #[arg(long, default_value = "stories15M.bin")] which_model: String, } #[derive(Parser, Debug, Clone)] pub struct TrainingCmd { /// A directory with the pre-tokenized dataset in the format generated by the tinystories.py /// script from llama2.c https://github.com/karpathy/llama2.c #[arg(long)] pretokenized_dir: String, #[arg(long, default_value_t = 32)] batch_size: usize, #[arg(long, default_value_t = 0.001)] learning_rate: f64, } #[derive(Subcommand, Debug, Clone)] enum Task { Inference(InferenceCmd), Eval(EvaluationCmd), Train(TrainingCmd), } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { /// The task to be performed, inference, training or evaluation. #[command(subcommand)] task: Option<Task>, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Tokenizer config file. #[arg(long)] tokenizer: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } impl Args { fn tokenizer(&self) -> Result<Tokenizer> { let tokenizer_path = match &self.tokenizer { Some(config) => std::path::PathBuf::from(config), None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("hf-internal-testing/llama-tokenizer".to_string()); api.get("tokenizer.json")? } }; Tokenizer::from_file(tokenizer_path).map_err(E::msg) } } fn main() -> anyhow::Result<()> { let args = Args::parse(); match &args.task { None => { let cmd = InferenceCmd { temperature: None, top_p: None, prompt: "".to_string(), config: None, model_id: "karpathy/tinyllamas".to_string(), which_model: "stories15M.bin".to_string(), }; run_inference(&cmd, &args)? } Some(Task::Inference(cmd)) => run_inference(cmd, &args)?, Some(Task::Eval(cmd)) => run_eval(cmd, &args)?, Some(Task::Train(cmd)) => training::run(cmd, &args)?, } Ok(()) } enum Model { Llama(Llama), QLlama(QLlama), } impl Model { fn forward(&self, xs: &Tensor, pos: usize, cache: &mut Cache) -> anyhow::Result<Tensor> { match self { Self::Llama(l) => Ok(l.forward(xs, pos, cache)?), Self::QLlama(l) => Ok(l.forward(xs, pos, cache)?), } } } fn run_eval(args: &EvaluationCmd, common_args: &Args) -> Result<()> { use std::io::BufRead; let config_path = match &args.config { Some(config) => std::path::PathBuf::from(config), None => { let api = hf_hub::api::sync::Api::new()?; println!("loading the model weights from {}", args.model_id); let api = api.model(args.model_id.clone()); api.get(&args.which_model)? } }; let tokenizer = common_args.tokenizer()?; let device = candle_examples::device(common_args.cpu)?; let mut file = std::fs::File::open(config_path)?; let config = Config::from_reader(&mut file)?; let weights = TransformerWeights::from_reader(&mut file, &config, &device)?; let vb = weights.var_builder(&config, &device)?; let mut cache = Cache::new(false, &config, vb.pp("rot"))?; let model = Llama::load(vb, config)?; let tokens = match &args.pretokenized_dir { None => { let api = hf_hub::api::sync::Api::new()?; let model_id = "roneneldan/TinyStories"; // TODO: Make this configurable. println!("loading the evaluation dataset from {}", model_id); let api = api.dataset(model_id.to_string()); let dataset_path = api.get("TinyStories-valid.txt")?; let file = std::fs::File::open(dataset_path)?; let file = std::io::BufReader::new(file); let mut tokens = vec![]; for line in file.lines() { let line = line?.replace("<|endoftext|>", "<s>"); let line = tokenizer.encode(line, false).map_err(E::msg)?; tokens.push(line.get_ids().to_vec()) } tokens.concat() } Some(pretokenized_dir) => { // Use shard 0 for the test split, similar to llama2.c // https://github.com/karpathy/llama2.c/blob/ce05cc28cf1e3560b873bb21837638a434520a67/tinystories.py#L121 let path = std::path::PathBuf::from(pretokenized_dir).join("data00.bin"); let bytes = std::fs::read(path)?; // Tokens are encoded as u16. let mut tokens = vec![0u16; bytes.len() / 2]; std::io::Cursor::new(bytes).read_u16_into::<LittleEndian>(&mut tokens)?; tokens.into_iter().map(|u| u as u32).collect::<Vec<u32>>() } }; println!("dataset loaded and encoded: {} tokens", tokens.len()); let seq_len = model.config.seq_len; let iter = (0..tokens.len()).step_by(seq_len).flat_map(|start_idx| { if start_idx + seq_len + 1 > tokens.len() { None } else { let tokens = &tokens[start_idx..start_idx + seq_len + 1]; let inputs = Tensor::new(&tokens[..seq_len], &device); let targets = Tensor::new(&tokens[1..], &device); Some(inputs.and_then(|inputs| targets.map(|targets| (inputs, targets)))) } }); let batch_iter = candle_datasets::Batcher::new_r2(iter).batch_size(args.batch_size); for inp_tgt in batch_iter { let (inp, tgt) = inp_tgt?; let logits = model.forward(&inp, 0, &mut cache)?; let loss = candle_nn::loss::cross_entropy(&logits.flatten_to(1)?, &tgt.flatten_to(1)?)?; println!("{}", loss.to_vec0::<f32>()?); } Ok(()) } fn run_inference(args: &InferenceCmd, common_args: &Args) -> Result<()> { let config_path = match &args.config { Some(config) => std::path::PathBuf::from(config), None => { let api = hf_hub::api::sync::Api::new()?; println!("loading the model weights from {}", args.model_id); let api = api.model(args.model_id.clone()); api.get(&args.which_model)? } }; let tokenizer = common_args.tokenizer()?; let device = candle_examples::device(common_args.cpu)?; #[cfg(feature = "cuda")] if let candle::Device::Cuda(d) = &device { unsafe { d.disable_event_tracking(); } }; let is_gguf = config_path.extension().map_or(false, |v| v == "gguf"); let is_safetensors = config_path .extension() .map_or(false, |v| v == "safetensors"); let (model, config, mut cache) = if is_gguf { let vb = qmodel::VarBuilder::from_gguf(config_path, &device)?; let (_vocab_size, dim) = vb .get_no_shape("model.embed_tokens.weight")? .shape() .dims2()?; let config = match dim { 64 => Config::tiny_260k(), 288 => Config::tiny_15m(), 512 => Config::tiny_42m(), 768 => Config::tiny_110m(), _ => anyhow::bail!("no config for dim {dim}"), }; let freq_cis_real = vb .get( (config.seq_len, config.head_size() / 2), "rot.freq_cis_real", )? .dequantize(&device)?; let freq_cis_imag = vb .get( (config.seq_len, config.head_size() / 2), "rot.freq_cis_imag", )? .dequantize(&device)?; let fake_vb = candle_nn::VarBuilder::from_tensors( [ ("freq_cis_real".to_string(), freq_cis_real), ("freq_cis_imag".to_string(), freq_cis_imag), ] .into_iter() .collect(), candle::DType::F32, &device, ); let cache = model::Cache::new(true, &config, fake_vb)?; let model = Model::QLlama(QLlama::load(vb, config.clone())?); (model, config, cache) } else if is_safetensors { let config = Config::tiny_15m(); let tensors = candle::safetensors::load(config_path, &device)?; let vb = candle_nn::VarBuilder::from_tensors(tensors, candle::DType::F32, &device); let cache = model::Cache::new(true, &config, vb.pp("rot"))?; let model = Model::Llama(Llama::load(vb, config.clone())?); (model, config, cache) } else { let mut file = std::fs::File::open(config_path)?; let config = Config::from_reader(&mut file)?; println!("{config:?}"); let weights = TransformerWeights::from_reader(&mut file, &config, &device)?; let vb = weights.var_builder(&config, &device)?; let cache = model::Cache::new(true, &config, vb.pp("rot"))?; let model = Model::Llama(Llama::load(vb, config.clone())?); (model, config, cache) }; println!("starting the inference loop"); let mut logits_processor = LogitsProcessor::new(299792458, args.temperature, args.top_p); let mut index_pos = 0; print!("{}", args.prompt); let mut tokens = tokenizer .encode(args.prompt.clone(), true) .map_err(E::msg)? .get_ids() .to_vec(); let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer); let start_gen = std::time::Instant::now(); for index in 0.. { if tokens.len() >= config.seq_len { break; } let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?; let logits = model.forward(&input, index_pos, &mut cache)?; let logits = logits.i((0, logits.dim(1)? - 1))?; let logits = if common_args.repeat_penalty == 1. || tokens.is_empty() { logits } else { let start_at = tokens.len().saturating_sub(common_args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, common_args.repeat_penalty, &tokens[start_at..], )? }; index_pos += ctxt.len(); let next_token = logits_processor.sample(&logits)?; tokens.push(next_token); if let Some(t) = tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } let dt = start_gen.elapsed(); println!( "\n{} tokens generated ({:.2} token/s)\n", tokens.len(), tokens.len() as f64 / dt.as_secs_f64(), ); Ok(()) }
candle/candle-examples/examples/llama2-c/main.rs/0
{ "file_path": "candle/candle-examples/examples/llama2-c/main.rs", "repo_id": "candle", "token_count": 6082 }
34
from pathlib import Path import warnings from transformers import AutoTokenizer from transformers.convert_slow_tokenizer import SpmConverter, requires_backends, import_protobuf class MarianConverter(SpmConverter): def __init__(self, *args, index: int = 0): requires_backends(self, "protobuf") super(SpmConverter, self).__init__(*args) # from .utils import sentencepiece_model_pb2 as model_pb2 model_pb2 = import_protobuf() m = model_pb2.ModelProto() print(self.original_tokenizer.spm_files) with open(self.original_tokenizer.spm_files[index], "rb") as f: m.ParseFromString(f.read()) self.proto = m print(self.original_tokenizer) #with open(self.original_tokenizer.vocab_path, "r") as f: dir_path = Path(self.original_tokenizer.spm_files[0]).parents[0] with open(dir_path / "vocab.json", "r") as f: import json self._vocab = json.load(f) if self.proto.trainer_spec.byte_fallback: if not getattr(self, "handle_byte_fallback", None): warnings.warn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers. In practice this means that the fast version of the" " tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these " "unknown tokens into a sequence of byte tokens matching the original piece of text." ) def vocab(self, proto): vocab_size = max(self._vocab.values()) + 1 vocab = [("<NIL>", -100) for _ in range(vocab_size)] for piece in proto.pieces: try: index = self._vocab[piece.piece] except Exception: print(f"Ignored missing piece {piece.piece}") vocab[index] = (piece.piece, piece.score) return vocab tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en", use_fast=False) fast_tokenizer = MarianConverter(tokenizer, index=0).converted() fast_tokenizer.save("tokenizer-marian-base-fr.json") fast_tokenizer = MarianConverter(tokenizer, index=1).converted() fast_tokenizer.save("tokenizer-marian-base-en.json")
candle/candle-examples/examples/marian-mt/python/convert_slow_tokenizer.py/0
{ "file_path": "candle/candle-examples/examples/marian-mt/python/convert_slow_tokenizer.py", "repo_id": "candle", "token_count": 990 }
35
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::mobilenetv4; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { Small, Medium, Large, HybridMedium, HybridLarge, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::Small => "conv_small.e2400_r224", Self::Medium => "conv_medium.e500_r256", Self::HybridMedium => "hybrid_medium.ix_e550_r256", Self::Large => "conv_large.e600_r384", Self::HybridLarge => "hybrid_large.ix_e600_r384", }; format!("timm/mobilenetv4_{name}_in1k") } fn resolution(&self) -> u32 { match self { Self::Small => 224, Self::Medium => 256, Self::HybridMedium => 256, Self::Large => 384, Self::HybridLarge => 384, } } fn config(&self) -> mobilenetv4::Config { match self { Self::Small => mobilenetv4::Config::small(), Self::Medium => mobilenetv4::Config::medium(), Self::HybridMedium => mobilenetv4::Config::hybrid_medium(), Self::Large => mobilenetv4::Config::large(), Self::HybridLarge => mobilenetv4::Config::hybrid_large(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::Small)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image(args.image, args.which.resolution() as usize)? .to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = mobilenetv4::mobilenetv4(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/mobilenetv4/main.rs/0
{ "file_path": "candle/candle-examples/examples/mobilenetv4/main.rs", "repo_id": "candle", "token_count": 1442 }
36
## Using ONNX models in Candle This example demonstrates how to run [ONNX](https://github.com/onnx/onnx) based models in Candle. It contains small variants of two models, [SqueezeNet](https://arxiv.org/pdf/1602.07360.pdf) (default) and [EfficientNet](https://arxiv.org/pdf/1905.11946.pdf). You can run the examples with following commands: ```bash cargo run --example onnx --features=onnx --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg ``` Use the `--which` flag to specify explicitly which network to use, i.e. ```bash $ cargo run --example onnx --features=onnx --release -- --which squeeze-net --image candle-examples/examples/yolo-v8/assets/bike.jpg Finished release [optimized] target(s) in 0.21s Running `target/release/examples/onnx --which squeeze-net --image candle-examples/examples/yolo-v8/assets/bike.jpg` loaded image Tensor[dims 3, 224, 224; f32] unicycle, monocycle : 83.23% ballplayer, baseball player : 3.68% bearskin, busby, shako : 1.54% military uniform : 0.78% cowboy hat, ten-gallon hat : 0.76% ``` ```bash $ cargo run --example onnx --features=onnx --release -- --which efficient-net --image candle-examples/examples/yolo-v8/assets/bike.jpg Finished release [optimized] target(s) in 0.20s Running `target/release/examples/onnx --which efficient-net --image candle-examples/examples/yolo-v8/assets/bike.jpg` loaded image Tensor[dims 224, 224, 3; f32] bicycle-built-for-two, tandem bicycle, tandem : 99.16% mountain bike, all-terrain bike, off-roader : 0.60% unicycle, monocycle : 0.17% crash helmet : 0.02% alp : 0.02% ```
candle/candle-examples/examples/onnx/README.md/0
{ "file_path": "candle/candle-examples/examples/onnx/README.md", "repo_id": "candle", "token_count": 832 }
37
# candle-quantized-phi Candle implementation of various quantized Phi models. ## Running an example ```bash $ cargo run --example quantized-phi --release -- --prompt "The best thing about coding in rust is " > - it's memory safe (without you having to worry too much) > - the borrow checker is really smart and will catch your mistakes for free, making them show up as compile errors instead of segfaulting in runtime. > > This alone make me prefer using rust over c++ or go, python/Cython etc. > > The major downside I can see now: > - it's slower than other languages (viz: C++) and most importantly lack of libraries to leverage existing work done by community in that language. There are so many useful machine learning libraries available for c++, go, python etc but none for Rust as far as I am aware of on the first glance. > - there aren't a lot of production ready projects which also makes it very hard to start new one (given my background) > > Another downside: ```
candle/candle-examples/examples/quantized-phi/README.md/0
{ "file_path": "candle/candle-examples/examples/quantized-phi/README.md", "repo_id": "candle", "token_count": 251 }
38
import gymnasium as gym import numpy as np from collections import deque from PIL import Image from multiprocessing import Process, Pipe # atari_wrappers.py class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset() if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.integers(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(0) if done: obs = self.env.reset() return obs class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self): self.env.reset() obs, _, done, _ = self.env.step(1) if done: self.env.reset() obs, _, done, _ = self.env.step(2) if done: self.env.reset() return obs class ImageSaver(gym.Wrapper): def __init__(self, env, img_path, rank): gym.Wrapper.__init__(self, env) self._cnt = 0 self._img_path = img_path self._rank = rank def step(self, action): step_result = self.env.step(action) obs, _, _, _ = step_result img = Image.fromarray(obs, 'RGB') img.save('%s/out%d-%05d.png' % (self._img_path, self._rank, self._cnt)) self._cnt += 1 return step_result class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so its important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset() else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = deque(maxlen=2) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for _ in range(self._skip): obs, reward, done, info = self.env.step(action) self._obs_buffer.append(obs) total_reward += reward if done: break max_frame = np.max(np.stack(self._obs_buffer), axis=0) return max_frame, total_reward, done, info def reset(self): """Clear past frame buffer and init. to first obs. from inner env.""" self._obs_buffer.clear() obs = self.env.reset() self._obs_buffer.append(obs) return obs class ClipRewardEnv(gym.RewardWrapper): def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env): """Warp frames to 84x84 as done in the Nature paper and later work.""" gym.ObservationWrapper.__init__(self, env) self.res = 84 self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8') def observation(self, obs): frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32')) frame = np.array(Image.fromarray(frame).resize((self.res, self.res), resample=Image.BILINEAR), dtype=np.uint8) return frame.reshape((self.res, self.res, 1)) class FrameStack(gym.Wrapper): def __init__(self, env, k): """Buffer observations and stack across channels (last axis).""" gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape assert shp[2] == 1 # can only stack 1-channel frames self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8') def reset(self): """Clear buffer and re-fill by duplicating the first observation.""" ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self.observation() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self.observation(), reward, done, info def observation(self): assert len(self.frames) == self.k return np.concatenate(self.frames, axis=2) def wrap_deepmind(env, episode_life=True, clip_rewards=True): """Configure environment for DeepMind-style Atari. Note: this does not include frame stacking!""" assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip if episode_life: env = EpisodicLifeEnv(env) env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) return env # envs.py def make_env(env_id, img_dir, seed, rank): def _thunk(): env = gym.make(env_id) env.reset(seed=(seed + rank)) if img_dir is not None: env = ImageSaver(env, img_dir, rank) env = wrap_deepmind(env) env = WrapPyTorch(env) return env return _thunk class WrapPyTorch(gym.ObservationWrapper): def __init__(self, env=None): super(WrapPyTorch, self).__init__(env) self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32') def observation(self, observation): return observation.transpose(2, 0, 1) # vecenv.py class VecEnv(object): """ Vectorized environment base class """ def step(self, vac): """ Apply sequence of actions to sequence of environments actions -> (observations, rewards, news) where 'news' is a boolean vector indicating whether each element is new. """ raise NotImplementedError def reset(self): """ Reset all environments """ raise NotImplementedError def close(self): pass # subproc_vec_env.py def worker(remote, env_fn_wrapper): env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) else: raise NotImplementedError class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns): """ envs: list of gym environments to run in subprocesses """ nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn))) for (work_remote, env_fn) in zip(self.work_remotes, env_fns)] for p in self.ps: p.start() self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() def step(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() @property def num_envs(self): return len(self.remotes) # Create the environment. def make(env_name, img_dir, num_processes): envs = SubprocVecEnv([ make_env(env_name, img_dir, 1337, i) for i in range(num_processes) ]) return envs
candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py", "repo_id": "candle", "token_count": 4740 }
39
# candle-segformer - [HuggingFace Segformer Model Card][segformer] - [`mit-b0` - An encoder only pretrained model][encoder] - [`segformer-b0-finetuned-ade-512-512` - A fine tuned model for segmentation][ade512] ## How to run the example If you want you can use the example images from this [pull request][pr], download them and supply the path to the image as an argument to the example. ```bash # run the image classification task cargo run --example segformer classify candle-examples/examples/yolo-v8/assets/bike.jpg # run the segmentation task cargo run --example segformer segment candle-examples/examples/yolo-v8/assets/bike.jpg ``` Example output for classification: ```text classification logits [3.275261e-5, 0.0008562019, 0.0008868563, 0.9977506, 0.0002465068, 0.0002241473, 2.846596e-6] label: hamburger ``` [pr]: https://github.com/huggingface/candle/pull/1617 [segformer]: https://huggingface.co/docs/transformers/model_doc/segformer [encoder]: https://huggingface.co/nvidia/mit-b0 [ade512]: https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512
candle/candle-examples/examples/segformer/README.md/0
{ "file_path": "candle/candle-examples/examples/segformer/README.md", "repo_id": "candle", "token_count": 383 }
40
# candle-stable-diffusion-3: Candle Implementation of Stable Diffusion 3/3.5 ![](assets/stable-diffusion-3.jpg) *A cute rusty robot holding a candle torch in its hand, with glowing neon text \"LETS GO RUSTY\" displayed on its chest, bright background, high quality, 4k*, generated by Stable Diffusion 3 Medium Stable Diffusion 3 Medium is a text-to-image model based on Multimodal Diffusion Transformer (MMDiT) architecture. - [huggingface repo](https://huggingface.co/stabilityai/stable-diffusion-3-medium) - [research paper](https://arxiv.org/pdf/2403.03206) - [announcement blog post](https://stability.ai/news/stable-diffusion-3-medium) Stable Diffusion 3.5 is a family of text-to-image models with latest improvements: - [announcement blog post](https://stability.ai/news/introducing-stable-diffusion-3-5) It has three variants: - [Stable Diffusion 3.5 Large](https://huggingface.co/stabilityai/stable-diffusion-3.5-large) @ 8.1b params, with scaled and slightly modified MMDiT architecture. - [Stable Diffusion 3.5 Large Turbo](https://huggingface.co/stabilityai/stable-diffusion-3.5-large-turbo) distilled version that enables 4-step inference. - [Stable Diffusion 3.5 Medium](https://huggingface.co/stabilityai/stable-diffusion-3.5-medium) @ 2.5b params, with improved MMDiT-X architecture. ## Getting access to the weights The weights of Stable Diffusion 3/3.5 is released by Stability AI under the Stability Community License. You will need to accept the conditions and acquire a license by visiting the repos on HuggingFace Hub to gain access to the weights for your HuggingFace account. To allow your computer to gain access to the public-gated repos on HuggingFace, you might need to create a [HuggingFace User Access Tokens](https://huggingface.co/docs/hub/en/security-tokens) (recommended) and log in on your computer if you haven't done that before. A convenient way to do the login is to use [huggingface-cli](https://huggingface.co/docs/huggingface_hub/en/guides/cli): ```shell huggingface-cli login ``` and you will be prompted to enter your token. On the first run, the weights will be automatically downloaded from the Huggingface Hub. After the download, the weights will be [cached](https://huggingface.co/docs/datasets/en/cache) and remain accessible locally. ## Running the model ```shell cargo run --example stable-diffusion-3 --release --features=cuda -- \ --which 3-medium --height 1024 --width 1024 \ --prompt 'A cute rusty robot holding a candle torch in its hand, with glowing neon text \"LETS GO RUSTY\" displayed on its chest, bright background, high quality, 4k' ``` To use different models, changed the value of `--which` option. (Possible values: `3-medium`, `3.5-large`, `3.5-large-turbo` and `3.5-medium`). To display other options available, ```shell cargo run --example stable-diffusion-3 --release --features=cuda -- --help ``` If GPU supports, Flash-Attention is a strongly recommended feature as it can greatly improve the speed of inference, as MMDiT is a transformer model heavily depends on attentions. To utilize [candle-flash-attn](https://github.com/huggingface/candle/tree/main/candle-flash-attn) in the demo, you will need both `--features flash-attn` and `--use-flash-attn`. ```shell cargo run --example stable-diffusion-3 --release --features=cuda,flash-attn -- --use-flash-attn ... ``` ## Performance Benchmark Below benchmark is done with Stable Diffusion 3 Medium by generating 1024-by-1024 image from 28 steps of Euler sampling and measure the average speed (iteration per seconds). [candle](https://github.com/huggingface/candle) and [candle-flash-attn](https://github.com/huggingface/candle/tree/main/candle-flash-attn) is based on the commit of [0d96ec3](https://github.com/huggingface/candle/commit/0d96ec31e8be03f844ed0aed636d6217dee9c7bc). System specs (Desktop PCIE 5 x8/x8 dual-GPU setup): - Operating System: Ubuntu 23.10 - CPU: i9 12900K w/o overclocking. - RAM: 64G dual-channel DDR5 @ 4800 MT/s | Speed (iter/s) | w/o flash-attn | w/ flash-attn | | -------------- | -------------- | ------------- | | RTX 3090 Ti | 0.83 | 2.15 | | RTX 4090 | 1.72 | 4.06 |
candle/candle-examples/examples/stable-diffusion-3/README.md/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion-3/README.md", "repo_id": "candle", "token_count": 1343 }
41
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::io::Write; use std::path::PathBuf; use candle_transformers::models::t5; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; const DTYPE: DType = DType::F32; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { T5Base, T5Small, T5Large, T5_3B, Mt5Base, Mt5Small, Mt5Large, } #[derive(Parser, Debug, Clone)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model repository to use on the HuggingFace hub. #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] model_file: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] config_file: Option<String>, /// Enable decoding. #[arg(long)] decode: bool, // Enable/disable decoding. #[arg(long, default_value = "false")] disable_cache: bool, /// Use this prompt, otherwise compute sentence similarities. #[arg(long)] prompt: Option<String>, /// If set along with --decode, will use this prompt to initialize the decoder. #[arg(long)] decoder_prompt: Option<String>, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The model to be used. #[arg(long, default_value = "t5-small")] which: Which, } struct T5ModelBuilder { device: Device, config: t5::Config, weights_filename: Vec<PathBuf>, } impl T5ModelBuilder { pub fn load(args: &Args) -> Result<(Self, Tokenizer)> { let device = candle_examples::device(args.cpu)?; let (default_model, default_revision) = match args.which { Which::T5Base => ("t5-base", "main"), Which::T5Small => ("t5-small", "refs/pr/15"), Which::T5Large => ("t5-large", "main"), Which::T5_3B => ("t5-3b", "main"), Which::Mt5Base => ("google/mt5-base", "refs/pr/5"), Which::Mt5Small => ("google/mt5-small", "refs/pr/6"), Which::Mt5Large => ("google/mt5-large", "refs/pr/2"), }; let default_model = default_model.to_string(); let default_revision = default_revision.to_string(); let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let repo = Repo::with_revision(model_id.clone(), RepoType::Model, revision); let api = Api::new()?; let repo = api.repo(repo); let config_filename = match &args.config_file { None => repo.get("config.json")?, Some(f) => f.into(), }; let tokenizer_filename = match &args.tokenizer_file { None => match args.which { Which::Mt5Base => api .model("lmz/mt5-tokenizers".into()) .get("mt5-base.tokenizer.json")?, Which::Mt5Small => api .model("lmz/mt5-tokenizers".into()) .get("mt5-small.tokenizer.json")?, Which::Mt5Large => api .model("lmz/mt5-tokenizers".into()) .get("mt5-large.tokenizer.json")?, _ => repo.get("tokenizer.json")?, }, Some(f) => f.into(), }; let weights_filename = match &args.model_file { Some(f) => f.split(',').map(|v| v.into()).collect::<Vec<_>>(), None => { if model_id == "google/flan-t5-xxl" || model_id == "google/flan-ul2" { candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")? } else { vec![repo.get("model.safetensors")?] } } }; let config = std::fs::read_to_string(config_filename)?; let mut config: t5::Config = serde_json::from_str(&config)?; config.use_cache = !args.disable_cache; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; Ok(( Self { device, config, weights_filename, }, tokenizer, )) } pub fn build_encoder(&self) -> Result<t5::T5EncoderModel> { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)? }; Ok(t5::T5EncoderModel::load(vb, &self.config)?) } pub fn build_conditional_generation(&self) -> Result<t5::T5ForConditionalGeneration> { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)? }; Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?) } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?; let device = &builder.device; let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; match args.prompt { Some(prompt) => { let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; if !args.decode { let mut model = builder.build_encoder()?; let start = std::time::Instant::now(); let ys = model.forward(&input_token_ids)?; println!("{ys}"); println!("Took {:?}", start.elapsed()); } else { let mut model = builder.build_conditional_generation()?; let mut output_token_ids = [builder .config .decoder_start_token_id .unwrap_or(builder.config.pad_token_id) as u32] .to_vec(); if let Some(decoder_prompt) = &args.decoder_prompt { print!("{decoder_prompt}"); output_token_ids.extend( tokenizer .encode(decoder_prompt.to_string(), false) .map_err(E::msg)? .get_ids() .to_vec(), ); } let temperature = if args.temperature <= 0. { None } else { Some(args.temperature) }; let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p); let encoder_output = model.encode(&input_token_ids)?; let start = std::time::Instant::now(); for index in 0.. { if output_token_ids.len() > 512 { break; } let decoder_token_ids = if index == 0 || !builder.config.use_cache { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == builder.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); print!("{text}"); std::io::stdout().flush()?; } } let dt = start.elapsed(); println!( "\n{} tokens generated ({:.2} token/s)\n", output_token_ids.len(), output_token_ids.len() as f64 / dt.as_secs_f64(), ); } } None => { let mut model = builder.build_encoder()?; let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = tokenizer .encode(sentence, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], model.device())?.unsqueeze(0)?; let embeddings = model.forward(&token_ids)?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); all_embeddings.push(embeddings) } let mut similarities = vec![]; for (i, e_i) in all_embeddings.iter().enumerate() { for (j, e_j) in all_embeddings .iter() .enumerate() .take(n_sentences) .skip(i + 1) { let sum_ij = (e_i * e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (e_i * e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (e_j * e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> Result<Tensor> { Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?) }
candle/candle-examples/examples/t5/main.rs/0
{ "file_path": "candle/candle-examples/examples/t5/main.rs", "repo_id": "candle", "token_count": 6911 }
42
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{Device, IndexOp, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use rand::{distr::Distribution, SeedableRng}; use tokenizers::Tokenizer; mod multilingual; use candle_transformers::models::whisper::{self as m, audio, Config}; use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; pub enum Model { Normal(m::model::Whisper), Quantized(m::quantized_model::Whisper), } // Maybe we should use some traits rather than doing the dispatch for all these. impl Model { pub fn config(&self) -> &Config { match self { Self::Normal(m) => &m.config, Self::Quantized(m) => &m.config, } } pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.encoder.forward(x, flush), Self::Quantized(m) => m.encoder.forward(x, flush), } } pub fn decoder_forward( &mut self, x: &Tensor, xa: &Tensor, flush: bool, ) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.forward(x, xa, flush), Self::Quantized(m) => m.decoder.forward(x, xa, flush), } } pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.final_linear(x), Self::Quantized(m) => m.decoder.final_linear(x), } } } #[allow(dead_code)] #[derive(Debug, Clone)] struct DecodingResult { tokens: Vec<u32>, text: String, avg_logprob: f64, no_speech_prob: f64, temperature: f64, compression_ratio: f64, } #[allow(dead_code)] #[derive(Debug, Clone)] struct Segment { start: f64, duration: f64, dr: DecodingResult, } struct Decoder { model: Model, rng: rand::rngs::StdRng, task: Option<Task>, timestamps: bool, verbose: bool, tokenizer: Tokenizer, suppress_tokens: Tensor, sot_token: u32, transcribe_token: u32, translate_token: u32, eot_token: u32, no_speech_token: u32, no_timestamps_token: u32, language_token: Option<u32>, } impl Decoder { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, device: &Device, language_token: Option<u32>, task: Option<Task>, timestamps: bool, verbose: bool, ) -> Result<Self> { let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?; // Suppress the notimestamps token when in timestamps mode. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L452 let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32) .map(|i| { if model.config().suppress_tokens.contains(&i) || timestamps && i == no_timestamps_token { f32::NEG_INFINITY } else { 0f32 } }) .collect(); let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?; let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?; let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?; let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?; let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?; let no_speech_token = m::NO_SPEECH_TOKENS .iter() .find_map(|token| token_id(&tokenizer, token).ok()); let no_speech_token = match no_speech_token { None => anyhow::bail!("unable to find any non-speech token"), Some(n) => n, }; Ok(Self { model, rng: rand::rngs::StdRng::seed_from_u64(seed), tokenizer, task, timestamps, verbose, suppress_tokens, sot_token, transcribe_token, translate_token, eot_token, no_speech_token, language_token, no_timestamps_token, }) } fn decode(&mut self, mel: &Tensor, t: f64) -> Result<DecodingResult> { let model = &mut self.model; let audio_features = model.encoder_forward(mel, true)?; if self.verbose { println!("audio features: {:?}", audio_features.dims()); } let sample_len = model.config().max_target_positions / 2; let mut sum_logprob = 0f64; let mut no_speech_prob = f64::NAN; let mut tokens = vec![self.sot_token]; if let Some(language_token) = self.language_token { tokens.push(language_token); } match self.task { None | Some(Task::Transcribe) => tokens.push(self.transcribe_token), Some(Task::Translate) => tokens.push(self.translate_token), } if !self.timestamps { tokens.push(self.no_timestamps_token); } for i in 0..sample_len { let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?; // The model expects a batch dim but this inference loop does not handle // it so we add it at this point. let tokens_t = tokens_t.unsqueeze(0)?; let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?; // Extract the no speech probability on the first iteration by looking at the first // token logits and the probability for the according token. if i == 0 { let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?; no_speech_prob = softmax(&logits, 0)? .i(self.no_speech_token as usize)? .to_scalar::<f32>()? as f64; } let (_, seq_len, _) = ys.dims3()?; let logits = model .decoder_final_linear(&ys.i((..1, seq_len - 1..))?)? .i(0)? .i(0)?; // TODO: Besides suppress tokens, we should apply the heuristics from // ApplyTimestampRules, i.e.: // - Timestamps come in pairs, except before EOT. // - Timestamps should be non-decreasing. // - If the sum of the probabilities of timestamps is higher than any other tokens, // only consider timestamps when sampling. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439 let logits = logits.broadcast_add(&self.suppress_tokens)?; let next_token = if t > 0f64 { let prs = softmax(&(&logits / t)?, 0)?; let logits_v: Vec<f32> = prs.to_vec1()?; let distr = rand::distr::weighted::WeightedIndex::new(&logits_v)?; distr.sample(&mut self.rng) as u32 } else { let logits_v: Vec<f32> = logits.to_vec1()?; logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .unwrap() }; tokens.push(next_token); let prob = softmax(&logits, candle::D::Minus1)? .i(next_token as usize)? .to_scalar::<f32>()? as f64; if next_token == self.eot_token || tokens.len() > model.config().max_target_positions { break; } sum_logprob += prob.ln(); } let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?; let avg_logprob = sum_logprob / tokens.len() as f64; Ok(DecodingResult { tokens, text, avg_logprob, no_speech_prob, temperature: t, compression_ratio: f64::NAN, }) } fn decode_with_fallback(&mut self, segment: &Tensor) -> Result<DecodingResult> { for (i, &t) in m::TEMPERATURES.iter().enumerate() { let dr: Result<DecodingResult> = self.decode(segment, t); if i == m::TEMPERATURES.len() - 1 { return dr; } // On errors, we try again with a different temperature. match dr { Ok(dr) => { let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD || dr.avg_logprob < m::LOGPROB_THRESHOLD; if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD { return Ok(dr); } } Err(err) => { println!("Error running at {t}: {err}") } } } unreachable!() } fn run(&mut self, mel: &Tensor, times: Option<(f64, f64)>) -> Result<Vec<Segment>> { let (_, _, content_frames) = mel.dims3()?; let mut seek = 0; let mut segments = vec![]; while seek < content_frames { let start = std::time::Instant::now(); let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let segment_size = usize::min(content_frames - seek, m::N_FRAMES); let mel_segment = mel.narrow(2, seek, segment_size)?; let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let dr = self.decode_with_fallback(&mel_segment)?; seek += segment_size; if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD { println!("no speech detected, skipping {seek} {dr:?}"); continue; } let segment = Segment { start: time_offset, duration: segment_duration, dr, }; if self.timestamps { println!( "{:.1}s -- {:.1}s", segment.start, segment.start + segment.duration, ); let mut tokens_to_decode = vec![]; let mut prev_timestamp_s = 0f32; for &token in segment.dr.tokens.iter() { if token == self.sot_token || token == self.eot_token { continue; } // The no_timestamp_token is the last before the timestamp ones. if token > self.no_timestamps_token { let timestamp_s = (token - self.no_timestamps_token + 1) as f32 / 50.; if !tokens_to_decode.is_empty() { let text = self .tokenizer .decode(&tokens_to_decode, true) .map_err(E::msg)?; println!(" {:.1}s-{:.1}s: {}", prev_timestamp_s, timestamp_s, text); tokens_to_decode.clear() } prev_timestamp_s = timestamp_s; } else { tokens_to_decode.push(token) } } if !tokens_to_decode.is_empty() { let text = self .tokenizer .decode(&tokens_to_decode, true) .map_err(E::msg)?; if !text.is_empty() { println!(" {:.1}s-...: {}", prev_timestamp_s, text); } tokens_to_decode.clear() } } else { match times { Some((start, end)) => { println!("{:.1}s -- {:.1}s: {}", start, end, segment.dr.text) } None => { println!( "{:.1}s -- {:.1}s: {}", segment.start, segment.start + segment.duration, segment.dr.text, ) } } } if self.verbose { println!("{seek}: {segment:?}, in {:?}", start.elapsed()); } segments.push(segment) } Ok(segments) } fn set_language_token(&mut self, language_token: Option<u32>) { self.language_token = language_token; } #[allow(dead_code)] fn reset_kv_cache(&mut self) { match &mut self.model { Model::Normal(m) => m.reset_kv_cache(), Model::Quantized(m) => m.reset_kv_cache(), } } fn model(&mut self) -> &mut Model { &mut self.model } } pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> { match tokenizer.token_to_id(token) { None => candle::bail!("no token-id for {token}"), Some(id) => Ok(id), } } #[derive(Clone, Copy, Debug, ValueEnum)] enum Task { Transcribe, Translate, } #[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] enum WhichModel { Tiny, #[value(name = "tiny.en")] TinyEn, Base, #[value(name = "base.en")] BaseEn, Small, #[value(name = "small.en")] SmallEn, Medium, #[value(name = "medium.en")] MediumEn, Large, LargeV2, LargeV3, LargeV3Turbo, #[value(name = "distil-medium.en")] DistilMediumEn, #[value(name = "distil-large-v2")] DistilLargeV2, } impl WhichModel { fn is_multilingual(&self) -> bool { match self { Self::Tiny | Self::Base | Self::Small | Self::Medium | Self::Large | Self::LargeV2 | Self::LargeV3 | Self::LargeV3Turbo | Self::DistilLargeV2 => true, Self::TinyEn | Self::BaseEn | Self::SmallEn | Self::MediumEn | Self::DistilMediumEn => { false } } } fn model_and_revision(&self) -> (&'static str, &'static str) { match self { Self::Tiny => ("openai/whisper-tiny", "main"), Self::TinyEn => ("openai/whisper-tiny.en", "refs/pr/15"), Self::Base => ("openai/whisper-base", "refs/pr/22"), Self::BaseEn => ("openai/whisper-base.en", "refs/pr/13"), Self::Small => ("openai/whisper-small", "main"), Self::SmallEn => ("openai/whisper-small.en", "refs/pr/10"), Self::Medium => ("openai/whisper-medium", "main"), Self::MediumEn => ("openai/whisper-medium.en", "main"), Self::Large => ("openai/whisper-large", "refs/pr/36"), Self::LargeV2 => ("openai/whisper-large-v2", "refs/pr/57"), Self::LargeV3 => ("openai/whisper-large-v3", "main"), Self::LargeV3Turbo => ("openai/whisper-large-v3-turbo", "main"), Self::DistilMediumEn => ("distil-whisper/distil-medium.en", "main"), Self::DistilLargeV2 => ("distil-whisper/distil-large-v2", "main"), } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] model_id: Option<String>, /// The model to use, check out available models: /// https://huggingface.co/models?search=whisper #[arg(long)] revision: Option<String>, /// The model to be used, can be tiny, small, medium. #[arg(long, default_value = "tiny.en")] model: WhichModel, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] quantized: bool, /// Language. #[arg(long)] language: Option<String>, /// Task, when no task is specified, the input tokens contain only the sot token which can /// improve things when in no-timestamp mode. #[arg(long)] task: Option<Task>, /// Timestamps mode, this is not fully implemented yet. #[arg(long)] timestamps: bool, /// Print the full DecodingResult structure rather than just the text. #[arg(long)] verbose: bool, /// The input device to use. #[arg(long)] device: Option<String>, } pub fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(args.cpu)?; let (default_model, default_revision) = if args.quantized { ("lmz/candle-whisper", "main") } else { args.model.model_and_revision() }; let default_model = default_model.to_string(); let default_revision = default_revision.to_string(); let (model_id, revision) = match (args.model_id, args.revision) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let (config_filename, tokenizer_filename, weights_filename) = { let api = Api::new()?; let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let (config, tokenizer, model) = if args.quantized { let ext = match args.model { WhichModel::TinyEn => "tiny-en", WhichModel::Tiny => "tiny", _ => unimplemented!("no quantized support for {:?}", args.model), }; ( repo.get(&format!("config-{ext}.json"))?, repo.get(&format!("tokenizer-{ext}.json"))?, repo.get(&format!("model-{ext}-q80.gguf"))?, ) } else { let config = repo.get("config.json")?; let tokenizer = repo.get("tokenizer.json")?; let model = repo.get("model.safetensors")?; (config, tokenizer, model) }; (config, tokenizer, model) }; let config: Config = serde_json::from_str(&std::fs::read_to_string(config_filename)?)?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &weights_filename, &device, )?; Model::Quantized(m::quantized_model::Whisper::load(&vb, config.clone())?) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], m::DTYPE, &device)? }; Model::Normal(m::model::Whisper::load(&vb, config.clone())?) }; let mut decoder = Decoder::new( model, tokenizer.clone(), args.seed, &device, /* language_token */ None, args.task, args.timestamps, args.verbose, )?; let mel_bytes = match config.num_mel_bins { 80 => include_bytes!("../whisper/melfilters.bytes").as_slice(), 128 => include_bytes!("../whisper/melfilters128.bytes").as_slice(), nmel => anyhow::bail!("unexpected num_mel_bins {nmel}"), }; let mut mel_filters = vec![0f32; mel_bytes.len() / 4]; <byteorder::LittleEndian as byteorder::ByteOrder>::read_f32_into(mel_bytes, &mut mel_filters); // Set up the input device and stream with the default input config. let host = cpal::default_host(); let audio_device = match args.device.as_ref() { None => host.default_input_device(), Some(device) => host .input_devices()? .find(|x| x.name().map_or(false, |y| &y == device)), } .expect("failed to find the audio input device"); let audio_config = audio_device .default_input_config() .expect("Failed to get default input config"); println!("audio config {audio_config:?}"); let channel_count = audio_config.channels() as usize; let in_sample_rate = audio_config.sample_rate().0 as usize; let resample_ratio = 16000. / in_sample_rate as f64; let mut resampler = rubato::FastFixedIn::new( resample_ratio, 10., rubato::PolynomialDegree::Septic, 1024, 1, )?; let (tx, rx) = std::sync::mpsc::channel(); let stream = audio_device.build_input_stream( &audio_config.config(), move |pcm: &[f32], _: &cpal::InputCallbackInfo| { let pcm = pcm .iter() .step_by(channel_count) .copied() .collect::<Vec<f32>>(); if !pcm.is_empty() { tx.send(pcm).unwrap() } }, move |err| { eprintln!("an error occurred on stream: {}", err); }, None, )?; stream.play()?; // loop to process the audio data forever (until the user stops the program) println!("transcribing audio..."); let mut buffered_pcm = vec![]; let mut language_token_set = false; while let Ok(pcm) = rx.recv() { use rubato::Resampler; buffered_pcm.extend_from_slice(&pcm); if buffered_pcm.len() < 10 * in_sample_rate { continue; } let mut resampled_pcm = vec![]; // resample the audio, one chunk of 1024 samples at a time. // in case the audio input failed to produce an exact multiple of 1024 samples, // process the remainder on the next iteration of the loop. let full_chunks = buffered_pcm.len() / 1024; let remainder = buffered_pcm.len() % 1024; for chunk in 0..full_chunks { let buffered_pcm = &buffered_pcm[chunk * 1024..(chunk + 1) * 1024]; let pcm = resampler.process(&[&buffered_pcm], None)?; resampled_pcm.extend_from_slice(&pcm[0]); } let pcm = resampled_pcm; println!("{} {}", buffered_pcm.len(), pcm.len()); if remainder == 0 { buffered_pcm.clear(); } else { // efficiently copy the remainder to the beginning of the `buffered_pcm` buffer and // truncate it. That's more efficient then allocating a new vector and copying into it println!("audio device produced partial chunk with {remainder} samples; processing the remainder on the next iteration of the loop"); buffered_pcm.copy_within(full_chunks * 1024.., 0); buffered_pcm.truncate(remainder); } let mel = audio::pcm_to_mel(&config, &pcm, &mel_filters); let mel_len = mel.len(); let mel = Tensor::from_vec( mel, (1, config.num_mel_bins, mel_len / config.num_mel_bins), &device, )?; // on the first iteration, we detect the language and set the language token. if !language_token_set { let language_token = match (args.model.is_multilingual(), args.language.clone()) { (true, None) => Some(multilingual::detect_language( decoder.model(), &tokenizer, &mel, )?), (false, None) => None, (true, Some(language)) => match token_id(&tokenizer, &format!("<|{language}|>")) { Ok(token_id) => Some(token_id), Err(_) => anyhow::bail!("language {language} is not supported"), }, (false, Some(_)) => { anyhow::bail!("a language cannot be set for non-multilingual models") } }; println!("language_token: {:?}", language_token); decoder.set_language_token(language_token); language_token_set = true; } decoder.run(&mel, None)?; decoder.reset_kv_cache(); } Ok(()) }
candle/candle-examples/examples/whisper-microphone/main.rs/0
{ "file_path": "candle/candle-examples/examples/whisper-microphone/main.rs", "repo_id": "candle", "token_count": 12460 }
43
use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{batch_norm, conv2d, conv2d_no_bias, Func, Module, VarBuilder}; use std::collections::BTreeMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; #[derive(Debug)] struct Block { block_type: String, parameters: BTreeMap<String, String>, } impl Block { fn get(&self, key: &str) -> Result<&str> { match self.parameters.get(key) { None => candle::bail!("cannot find {} in {}", key, self.block_type), Some(value) => Ok(value), } } } #[derive(Debug)] pub struct Darknet { blocks: Vec<Block>, parameters: BTreeMap<String, String>, } impl Darknet { fn get(&self, key: &str) -> Result<&str> { match self.parameters.get(key) { None => candle::bail!("cannot find {} in net parameters", key), Some(value) => Ok(value), } } } struct Accumulator { block_type: Option<String>, parameters: BTreeMap<String, String>, net: Darknet, } impl Accumulator { fn new() -> Accumulator { Accumulator { block_type: None, parameters: BTreeMap::new(), net: Darknet { blocks: vec![], parameters: BTreeMap::new(), }, } } fn finish_block(&mut self) { match &self.block_type { None => (), Some(block_type) => { if block_type == "net" { self.net.parameters = self.parameters.clone(); } else { let block = Block { block_type: block_type.to_string(), parameters: self.parameters.clone(), }; self.net.blocks.push(block); } self.parameters.clear(); } } self.block_type = None; } } pub fn parse_config<T: AsRef<Path>>(path: T) -> Result<Darknet> { let file = File::open(path.as_ref())?; let mut acc = Accumulator::new(); for line in BufReader::new(file).lines() { let line = line?; if line.is_empty() || line.starts_with('#') { continue; } let line = line.trim(); if line.starts_with('[') { if !line.ends_with(']') { candle::bail!("line does not end with ']' {line}") } let line = &line[1..line.len() - 1]; acc.finish_block(); acc.block_type = Some(line.to_string()); } else { let key_value: Vec<&str> = line.splitn(2, '=').collect(); if key_value.len() != 2 { candle::bail!("missing equal {line}") } let prev = acc.parameters.insert( key_value[0].trim().to_owned(), key_value[1].trim().to_owned(), ); if prev.is_some() { candle::bail!("multiple value for key {}", line) } } } acc.finish_block(); Ok(acc.net) } enum Bl { Layer(Box<dyn candle_nn::Module + Send + Sync>), Route(Vec<usize>), Shortcut(usize), Yolo(usize, Vec<(usize, usize)>), } fn conv(vb: VarBuilder, index: usize, p: usize, b: &Block) -> Result<(usize, Bl)> { let activation = b.get("activation")?; let filters = b.get("filters")?.parse::<usize>()?; let pad = b.get("pad")?.parse::<usize>()?; let size = b.get("size")?.parse::<usize>()?; let stride = b.get("stride")?.parse::<usize>()?; let padding = if pad != 0 { (size - 1) / 2 } else { 0 }; let (bn, bias) = match b.parameters.get("batch_normalize") { Some(p) if p.parse::<usize>()? != 0 => { let bn = batch_norm(filters, 1e-5, vb.pp(format!("batch_norm_{index}")))?; (Some(bn), false) } Some(_) | None => (None, true), }; let conv_cfg = candle_nn::Conv2dConfig { stride, padding, groups: 1, dilation: 1, cudnn_fwd_algo: None, }; let conv = if bias { conv2d(p, filters, size, conv_cfg, vb.pp(format!("conv_{index}")))? } else { conv2d_no_bias(p, filters, size, conv_cfg, vb.pp(format!("conv_{index}")))? }; let leaky = match activation { "leaky" => true, "linear" => false, otherwise => candle::bail!("unsupported activation {}", otherwise), }; let func = candle_nn::func(move |xs| { let xs = conv.forward(xs)?; let xs = match &bn { Some(bn) => xs.apply_t(bn, false)?, None => xs, }; let xs = if leaky { xs.maximum(&(&xs * 0.1)?)? } else { xs }; Ok(xs) }); Ok((filters, Bl::Layer(Box::new(func)))) } fn upsample(prev_channels: usize) -> Result<(usize, Bl)> { let layer = candle_nn::func(|xs| { let (_n, _c, h, w) = xs.dims4()?; xs.upsample_nearest2d(2 * h, 2 * w) }); Ok((prev_channels, Bl::Layer(Box::new(layer)))) } fn int_list_of_string(s: &str) -> Result<Vec<i64>> { let res: std::result::Result<Vec<_>, _> = s.split(',').map(|xs| xs.trim().parse::<i64>()).collect(); Ok(res?) } fn usize_of_index(index: usize, i: i64) -> usize { if i >= 0 { i as usize } else { (index as i64 + i) as usize } } fn route(index: usize, p: &[(usize, Bl)], block: &Block) -> Result<(usize, Bl)> { let layers = int_list_of_string(block.get("layers")?)?; let layers: Vec<usize> = layers .into_iter() .map(|l| usize_of_index(index, l)) .collect(); let channels = layers.iter().map(|&l| p[l].0).sum(); Ok((channels, Bl::Route(layers))) } fn shortcut(index: usize, p: usize, block: &Block) -> Result<(usize, Bl)> { let from = block.get("from")?.parse::<i64>()?; Ok((p, Bl::Shortcut(usize_of_index(index, from)))) } fn yolo(p: usize, block: &Block) -> Result<(usize, Bl)> { let classes = block.get("classes")?.parse::<usize>()?; let flat = int_list_of_string(block.get("anchors")?)?; if flat.len() % 2 != 0 { candle::bail!("even number of anchors"); } let flat = flat.into_iter().map(|i| i as usize).collect::<Vec<_>>(); let anchors: Vec<_> = (0..(flat.len() / 2)) .map(|i| (flat[2 * i], flat[2 * i + 1])) .collect(); let mask = int_list_of_string(block.get("mask")?)?; let anchors = mask.into_iter().map(|i| anchors[i as usize]).collect(); Ok((p, Bl::Yolo(classes, anchors))) } fn detect( xs: &Tensor, image_height: usize, classes: usize, anchors: &[(usize, usize)], ) -> Result<Tensor> { let (bsize, _channels, height, _width) = xs.dims4()?; let stride = image_height / height; let grid_size = image_height / stride; let bbox_attrs = 5 + classes; let nanchors = anchors.len(); let xs = xs .reshape((bsize, bbox_attrs * nanchors, grid_size * grid_size))? .transpose(1, 2)? .contiguous()? .reshape((bsize, grid_size * grid_size * nanchors, bbox_attrs))?; let grid = Tensor::arange(0u32, grid_size as u32, &Device::Cpu)?; let a = grid.repeat((grid_size, 1))?; let b = a.t()?.contiguous()?; let x_offset = a.flatten_all()?.unsqueeze(1)?; let y_offset = b.flatten_all()?.unsqueeze(1)?; let xy_offset = Tensor::cat(&[&x_offset, &y_offset], 1)? .repeat((1, nanchors))? .reshape((grid_size * grid_size * nanchors, 2))? .unsqueeze(0)? .to_dtype(DType::F32)?; let anchors: Vec<f32> = anchors .iter() .flat_map(|&(x, y)| vec![x as f32 / stride as f32, y as f32 / stride as f32].into_iter()) .collect(); let anchors = Tensor::new(anchors.as_slice(), &Device::Cpu)? .reshape((anchors.len() / 2, 2))? .repeat((grid_size * grid_size, 1))? .unsqueeze(0)?; let ys02 = xs.i((.., .., 0..2))?; let ys24 = xs.i((.., .., 2..4))?; let ys4 = xs.i((.., .., 4..))?; let ys02 = (candle_nn::ops::sigmoid(&ys02)?.add(&xy_offset)? * stride as f64)?; let ys24 = (ys24.exp()?.mul(&anchors)? * stride as f64)?; let ys4 = candle_nn::ops::sigmoid(&ys4)?; let ys = Tensor::cat(&[ys02, ys24, ys4], 2)?; Ok(ys) } impl Darknet { pub fn height(&self) -> Result<usize> { let image_height = self.get("height")?.parse::<usize>()?; Ok(image_height) } pub fn width(&self) -> Result<usize> { let image_width = self.get("width")?.parse::<usize>()?; Ok(image_width) } pub fn build_model(&self, vb: VarBuilder) -> Result<Func<'_>> { let mut blocks: Vec<(usize, Bl)> = vec![]; let mut prev_channels: usize = 3; for (index, block) in self.blocks.iter().enumerate() { let channels_and_bl = match block.block_type.as_str() { "convolutional" => conv(vb.pp(index.to_string()), index, prev_channels, block)?, "upsample" => upsample(prev_channels)?, "shortcut" => shortcut(index, prev_channels, block)?, "route" => route(index, &blocks, block)?, "yolo" => yolo(prev_channels, block)?, otherwise => candle::bail!("unsupported block type {}", otherwise), }; prev_channels = channels_and_bl.0; blocks.push(channels_and_bl); } let image_height = self.height()?; let func = candle_nn::func(move |xs| { let mut prev_ys: Vec<Tensor> = vec![]; let mut detections: Vec<Tensor> = vec![]; for (_, b) in blocks.iter() { let ys = match b { Bl::Layer(l) => { let xs = prev_ys.last().unwrap_or(xs); l.forward(xs)? } Bl::Route(layers) => { let layers: Vec<_> = layers.iter().map(|&i| &prev_ys[i]).collect(); Tensor::cat(&layers, 1)? } Bl::Shortcut(from) => (prev_ys.last().unwrap() + prev_ys.get(*from).unwrap())?, Bl::Yolo(classes, anchors) => { let xs = prev_ys.last().unwrap_or(xs); detections.push(detect(xs, image_height, *classes, anchors)?); Tensor::new(&[0u32], &Device::Cpu)? } }; prev_ys.push(ys); } Tensor::cat(&detections, 1) }); Ok(func) } }
candle/candle-examples/examples/yolo-v3/darknet.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/darknet.rs", "repo_id": "candle", "token_count": 5418 }
44
pub mod audio; pub mod bs1770; pub mod coco_classes; pub mod imagenet; pub mod token_output_stream; pub mod wav; use candle::utils::{cuda_is_available, metal_is_available}; use candle::{Device, Result, Tensor}; pub fn device(cpu: bool) -> Result<Device> { if cpu { Ok(Device::Cpu) } else if cuda_is_available() { Ok(Device::new_cuda(0)?) } else if metal_is_available() { Ok(Device::new_metal(0)?) } else { #[cfg(all(target_os = "macos", target_arch = "aarch64"))] { println!( "Running on CPU, to run on GPU(metal), build this example with `--features metal`" ); } #[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] { println!("Running on CPU, to run on GPU, build this example with `--features cuda`"); } Ok(Device::Cpu) } } pub fn load_image<P: AsRef<std::path::Path>>( p: P, resize_longest: Option<usize>, ) -> Result<(Tensor, usize, usize)> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)?; let (initial_h, initial_w) = (img.height() as usize, img.width() as usize); let img = match resize_longest { None => img, Some(resize_longest) => { let (height, width) = (img.height(), img.width()); let resize_longest = resize_longest as u32; let (height, width) = if height < width { let h = (resize_longest * height) / width; (h, resize_longest) } else { let w = (resize_longest * width) / height; (resize_longest, w) }; img.resize_exact(width, height, image::imageops::FilterType::CatmullRom) } }; let (height, width) = (img.height() as usize, img.width() as usize); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (height, width, 3), &Device::Cpu)?.permute((2, 0, 1))?; Ok((data, initial_h, initial_w)) } pub fn load_image_and_resize<P: AsRef<std::path::Path>>( p: P, width: usize, height: usize, ) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let data = img.into_raw(); Tensor::from_vec(data, (width, height, 3), &Device::Cpu)?.permute((2, 0, 1)) } /// Saves an image to disk using the image crate, this expects an input with shape /// (c, height, width). pub fn save_image<P: AsRef<std::path::Path>>(img: &Tensor, p: P) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; image.save(p).map_err(candle::Error::wrap)?; Ok(()) } pub fn save_image_resize<P: AsRef<std::path::Path>>( img: &Tensor, p: P, h: usize, w: usize, ) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; let image = image::DynamicImage::from(image); let image = image.resize_to_fill(w as u32, h as u32, image::imageops::FilterType::CatmullRom); image.save(p).map_err(candle::Error::wrap)?; Ok(()) } /// Loads the safetensors files for a model from the hub based on a json index file. pub fn hub_load_safetensors( repo: &hf_hub::api::sync::ApiRepo, json_file: &str, ) -> Result<Vec<std::path::PathBuf>> { let json_file = repo.get(json_file).map_err(candle::Error::wrap)?; let json_file = std::fs::File::open(json_file)?; let json: serde_json::Value = serde_json::from_reader(&json_file).map_err(candle::Error::wrap)?; let weight_map = match json.get("weight_map") { None => candle::bail!("no weight map in {json_file:?}"), Some(serde_json::Value::Object(map)) => map, Some(_) => candle::bail!("weight map in {json_file:?} is not a map"), }; let mut safetensors_files = std::collections::HashSet::new(); for value in weight_map.values() { if let Some(file) = value.as_str() { safetensors_files.insert(file.to_string()); } } let safetensors_files = safetensors_files .iter() .map(|v| repo.get(v).map_err(candle::Error::wrap)) .collect::<Result<Vec<_>>>()?; Ok(safetensors_files) } pub fn hub_load_local_safetensors<P: AsRef<std::path::Path>>( path: P, json_file: &str, ) -> Result<Vec<std::path::PathBuf>> { let path = path.as_ref(); let jsfile = std::fs::File::open(path.join(json_file))?; let json: serde_json::Value = serde_json::from_reader(&jsfile).map_err(candle::Error::wrap)?; let weight_map = match json.get("weight_map") { None => candle::bail!("no weight map in {json_file:?}"), Some(serde_json::Value::Object(map)) => map, Some(_) => candle::bail!("weight map in {json_file:?} is not a map"), }; let mut safetensors_files = std::collections::HashSet::new(); for value in weight_map.values() { if let Some(file) = value.as_str() { safetensors_files.insert(file); } } let safetensors_files: Vec<_> = safetensors_files .into_iter() .map(|v| path.join(v)) .collect(); Ok(safetensors_files) }
candle/candle-examples/src/lib.rs/0
{ "file_path": "candle/candle-examples/src/lib.rs", "repo_id": "candle", "token_count": 2878 }
45
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include "cute/tensor.hpp" #include "cutlass/cutlass.h" #include "cutlass/layout/layout.h" #include <cutlass/numeric_types.h> using namespace cute; template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, typename elem_type=cutlass::half_t> struct Flash_kernel_traits { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using Element = elem_type; static constexpr bool Has_cp_async = true; #else using Element = cutlass::half_t; static constexpr bool Has_cp_async = false; #endif using ElementAccum = float; using index_t = int64_t; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using MMA_Atom_Arch = std::conditional_t< std::is_same_v<elem_type, cutlass::half_t>, MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>, MMA_Atom<SM80_16x8x16_F32BF16BF16F32_TN> >; #else using MMA_Atom_Arch = MMA_Atom<SM75_16x8x8_F32F16F16F32_TN>; #endif #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<SM75_U16x8_LDSM_T, elem_type>; #else using SmemCopyAtom = Copy_Atom<DefaultCopy, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<DefaultCopy, elem_type>; #endif }; // If Share_Q_K_smem is true, that forces Is_Q_in_regs to be true template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, bool Is_Q_in_regs_=false, bool Share_Q_K_smem_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_fwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Share_Q_K_smem = Share_Q_K_smem_; static constexpr bool Is_Q_in_regs = Is_Q_in_regs_ || Share_Q_K_smem; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; using TiledMma = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<kNWarps>,_1,_1>>, // 4x1x1 or 8x1x1 thread group Tile<Int<16 * kNWarps>, _16, _16>>; using SmemLayoutAtomQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, // This has to be kBlockKSmem, using kHeadDim gives wrong results for d=128 Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQ = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemLayoutKV = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockN>, Int<kHeadDim>>{})); // https://github.com/ColfaxResearch/cutlass-kernels/blob/a222587e6d59b93ba704853d3946fb686d8b8892/src/fmha/fmha_forward.cu#L434 using SmemLayoutVtransposed = decltype( composition(SmemLayoutKV{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockN>>{}, GenRowMajor{}))); using SmemLayoutVtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutVtransposed{})); using SmemLayoutAtomO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<8>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutO = decltype(tile_to_shape( SmemLayoutAtomO{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemCopyAtomO = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>; using SmemCopyAtomOaccum = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>; static constexpr int kSmemQSize = size(SmemLayoutQ{}) * sizeof(Element); static constexpr int kSmemKVSize = size(SmemLayoutKV{}) * 2 * sizeof(Element); static constexpr int kSmemSize = Share_Q_K_smem ? std::max(kSmemQSize, kSmemKVSize) : kSmemQSize + kSmemKVSize; static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem here is 6-10% faster than kBlockKGmem for d=128 because of bank conflicts. // For example, for d=128, smem is split into 2 "pages", each page takes care of columns // 0-63 and 64-127. If we have 16 threads per row for gmem read, when we write to smem, // thread 0 - 7 will write to the first page and thread 8 - 15 will write to the second page, // to the same banks. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, AutoVectorizingCopyWithAssumedAlignment<128> >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, Element>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopyO = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store using GmemLayoutAtomOaccum = std::conditional_t< kBlockKSmem == 32, Layout<Shape <_16, _8>, // Thread layout, 8 threads per row Stride< _8, _1>>, Layout<Shape <_8, _16>, // Thread layout, 16 threads per row Stride< _16, _1>> >; using GmemTiledCopyOaccum = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, GmemLayoutAtomOaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store using GmemLayoutAtomRotcossin = GmemLayoutAtom; using GmemTiledCopyRotcossin = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<uint64_t>, Element>{}, GmemLayoutAtomRotcossin{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per load using GmemTiledCopyRotcossinCont = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>{}, GmemLayoutAtomRotcossin{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per load }; // Is_V_in_regs is an option to reduce smem usage, but will increase register pressue. // No_double_buffer is another option to reduce smem usage, but will slow things down. template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, int AtomLayoutMSdP_=1, int AtomLayoutNdKV=2, int AtomLayoutMdQ=2, bool Is_V_in_regs_=false, bool No_double_buffer_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_bwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Is_V_in_regs = Is_V_in_regs_; static constexpr bool No_double_buffer = No_double_buffer_; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; static constexpr int AtomLayoutMSdP = AtomLayoutMSdP_; static_assert(kNWarps % AtomLayoutMSdP == 0); static_assert(kNWarps % AtomLayoutNdKV == 0); static_assert(kNWarps % AtomLayoutMdQ == 0); using TiledMmaSdP = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutMSdP>, Int<kNWarps / AtomLayoutMSdP>, _1>>, Tile<Int<16 * AtomLayoutMSdP>, Int<16 * kNWarps / AtomLayoutMSdP>, _16>>; using TiledMmadKV = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutNdKV>, Int<kNWarps / AtomLayoutNdKV>, _1>>, Tile<Int<16 * AtomLayoutNdKV>, Int<16 * kNWarps / AtomLayoutNdKV>, _16>>; using TiledMmadQ = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutMdQ>, Int<kNWarps / AtomLayoutMdQ>, _1>>, // 2x4x1 or 4x2x1 thread group Tile<Int<16 * AtomLayoutMdQ>, Int<16 * kNWarps / AtomLayoutMdQ>, _16>>; using SmemLayoutAtomQdO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQdO = decltype(tile_to_shape( SmemLayoutAtomQdO{}, make_shape(Int<kBlockM>{}, Int<kHeadDim>{}))); using SmemLayoutAtomKV = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<kBlockM / kNWarps>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutKV = decltype(tile_to_shape( // SmemLayoutAtomQdO{}, SmemLayoutAtomKV{}, make_shape(Int<kBlockN>{}, Int<kHeadDim>{}))); using SmemLayoutKtransposed = decltype( composition(SmemLayoutKV{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockN>>{}, GenRowMajor{}))); using SmemLayoutKtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutKtransposed{})); // TODO: generalize to other values of kBlockN // TODO: what should be the Swizzle here? 3 is faster than 1, and 1 is faster than 2 // static constexpr int kPBlockN = kBlockN; // Temporarily disabling this for hdim 256 on sm86 and sm89 // static_assert(kBlockN >= 64); static_assert(kBlockN >= 32); // TD [2023-03-19]: Idk why kPBlockN = 16 and kSwizzlePdS=3 is the fastest. static constexpr int kPBlockN = kBlockN >= 64 ? 64 : 32; static_assert(kPBlockN == 16 || kPBlockN == 32 || kPBlockN == 64); // static constexpr int kSwizzlePdS = kPBlockN == 16 ? 1 : (kPBlockN == 32 ? 2 : 3); static constexpr int kSwizzlePdS = 3; using SmemLayoutAtomPdS = decltype( composition(Swizzle<kSwizzlePdS, 3, 3>{}, Layout<Shape<Int<kBlockM>, Int<kPBlockN>>, Stride<Int<kPBlockN>, _1>>{})); using SmemLayoutPdS = decltype(tile_to_shape( SmemLayoutAtomPdS{}, make_shape(Int<kBlockM>{}, Int<kBlockN>{}))); using SmemLayoutPdStransposed = decltype( composition(SmemLayoutPdS{}, make_layout(Shape<Int<kBlockN>, Int<kBlockM>>{}, GenRowMajor{}))); using SmemLayoutPdStransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutPdStransposed{})); using SmemCopyAtomPdS = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>; using SmemLayoutQdOtransposed = decltype( composition(SmemLayoutQdO{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockM>>{}, GenRowMajor{}))); using SmemLayoutQdOtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutQdOtransposed{})); using SmemLayoutAtomdKV = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutdKV = decltype(tile_to_shape( SmemLayoutAtomdKV{}, make_shape(Int<kBlockN>{}, Int<kHeadDim>{}))); using SmemCopyAtomdKV = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>; using SmemLayoutAtomdQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutdQ = decltype(tile_to_shape( SmemLayoutAtomdQ{}, make_shape(Int<kBlockM>{}, Int<kHeadDim>{}))); using SmemCopyAtomdQ = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>; // Double buffer for sQ static constexpr int kSmemQdOSize = size(SmemLayoutQdO{}) * (No_double_buffer ? 2 : 3) * sizeof(Element); static constexpr int kSmemKVSize = size(SmemLayoutKV{}) * 2 * sizeof(Element); static constexpr int kSmemdSSize = size(SmemLayoutPdS{}) * sizeof(Element); static constexpr int kSmemPSize = size(SmemLayoutPdS{}) * sizeof(Element); static constexpr int kSmemdQSize = size(SmemLayoutdQ{}) * sizeof(Element); static constexpr int kSmemSize = kSmemQdOSize + (!Is_V_in_regs ? kSmemKVSize + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize) : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize))); static constexpr int kSmemSize1colblock = kSmemQdOSize + (!Is_V_in_regs ? kSmemKVSize + kSmemdSSize + kSmemPSize : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + kSmemPSize)); static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem instead of kHeadDim here to avoid bank conflicts, but doesn't seem // to affect speed in practice. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, AutoVectorizingCopyWithAssumedAlignment<128> >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, elem_type>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopydO = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemTiledCopydKV = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemTiledCopydQ = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemLayoutAtomdQaccum = std::conditional_t< kBlockKSmem == 32, Layout<Shape <_32, _8>, // Thread layout, 8 threads per row Stride< _8, _1>>, Layout<Shape <_16, _16>, // Thread layout, 16 threads per row Stride< _16, _1>> >; using GmemTiledCopydQaccum = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, GmemLayoutAtomdQaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store using GmemTiledCopydQaccumAtomicAdd = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, Layout<Shape <_8, _32>, // Thread layout, 8 threads per row Stride<_32, _1>>{}, Layout<Shape < _1, _1>>{})); // Val layout, 1 val per store }; ////////////////////////////////////////////////////////////////////////////////////////////////////
candle/candle-flash-attn/kernels/kernel_traits.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernel_traits.h", "repo_id": "candle", "token_count": 7903 }
46
#include "binary_op_macros.cuh" #include<stdint.h> #if __CUDA_ARCH__ >= 800 BINARY_OP(__nv_bfloat16, badd_bf16, x + y) BINARY_OP(__nv_bfloat16, bdiv_bf16, x / y) BINARY_OP(__nv_bfloat16, bmul_bf16, x * y) BINARY_OP(__nv_bfloat16, bsub_bf16, x - y) BINARY_OP(__nv_bfloat16, bmaximum_bf16, maxg(x, y)) BINARY_OP(__nv_bfloat16, bminimum_bf16, ming(x, y)) BINARY_OP_OUT(__nv_bfloat16, uint8_t, eq_bf16, x == y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, ne_bf16, x != y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, lt_bf16, x < y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, le_bf16, x <= y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, gt_bf16, x > y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, ge_bf16, x >= y) #define F8E4M3_TO_FLOAT(x) __half2float(__nv_cvt_fp8_to_halfraw(x.__x, __NV_E4M3)) BINARY_OP(__nv_fp8_e4m3, badd_f8_e4m3, __nv_fp8_e4m3(F8E4M3_TO_FLOAT(x) + F8E4M3_TO_FLOAT(y))) BINARY_OP(__nv_fp8_e4m3, bdiv_f8_e4m3, __nv_fp8_e4m3(F8E4M3_TO_FLOAT(x) / F8E4M3_TO_FLOAT(y))) BINARY_OP(__nv_fp8_e4m3, bmul_f8_e4m3, __nv_fp8_e4m3(F8E4M3_TO_FLOAT(x) * F8E4M3_TO_FLOAT(y))) BINARY_OP(__nv_fp8_e4m3, bsub_f8_e4m3, __nv_fp8_e4m3(F8E4M3_TO_FLOAT(x) - F8E4M3_TO_FLOAT(y))) BINARY_OP(__nv_fp8_e4m3, bmaximum_f8_e4m3, maxg(x, y)) BINARY_OP(__nv_fp8_e4m3, bminimum_f8_e4m3, ming(x, y)) BINARY_OP_OUT(__nv_fp8_e4m3, uint8_t, eq_f8_e4m3, F8E4M3_TO_FLOAT(x) == F8E4M3_TO_FLOAT(y)) BINARY_OP_OUT(__nv_fp8_e4m3, uint8_t, ne_f8_e4m3, F8E4M3_TO_FLOAT(x) != F8E4M3_TO_FLOAT(y)) BINARY_OP_OUT(__nv_fp8_e4m3, uint8_t, lt_f8_e4m3, F8E4M3_TO_FLOAT(x) < F8E4M3_TO_FLOAT(y)) BINARY_OP_OUT(__nv_fp8_e4m3, uint8_t, le_f8_e4m3, F8E4M3_TO_FLOAT(x) <= F8E4M3_TO_FLOAT(y)) BINARY_OP_OUT(__nv_fp8_e4m3, uint8_t, gt_f8_e4m3, F8E4M3_TO_FLOAT(x) > F8E4M3_TO_FLOAT(y)) BINARY_OP_OUT(__nv_fp8_e4m3, uint8_t, ge_f8_e4m3, F8E4M3_TO_FLOAT(x) >= F8E4M3_TO_FLOAT(y)) #endif #if __CUDA_ARCH__ >= 530 BINARY_OP(__half, badd_f16, x + y) BINARY_OP(__half, bdiv_f16, x / y) BINARY_OP(__half, bmul_f16, x * y) BINARY_OP(__half, bsub_f16, x - y) BINARY_OP(__half, bmaximum_f16, maxg(x, y)) BINARY_OP(__half, bminimum_f16, ming(x, y)) BINARY_OP_OUT(__half, uint8_t, eq_f16, x == y) BINARY_OP_OUT(__half, uint8_t, ne_f16, x != y) BINARY_OP_OUT(__half, uint8_t, lt_f16, x < y) BINARY_OP_OUT(__half, uint8_t, le_f16, x <= y) BINARY_OP_OUT(__half, uint8_t, gt_f16, x > y) BINARY_OP_OUT(__half, uint8_t, ge_f16, x >= y) #endif BINARY_OP(float, badd_f32, x + y) BINARY_OP(double, badd_f64, x + y); BINARY_OP(uint8_t, badd_u8, x + y); BINARY_OP(uint32_t, badd_u32, x + y); BINARY_OP(int64_t, badd_i64, x + y); BINARY_OP(float, bdiv_f32, x / y) BINARY_OP(double, bdiv_f64, x / y); BINARY_OP(uint8_t, bdiv_u8, x / y); BINARY_OP(uint32_t, bdiv_u32, x / y); BINARY_OP(int64_t, bdiv_i64, x / y); BINARY_OP(float, bmul_f32, x * y) BINARY_OP(double, bmul_f64, x * y); BINARY_OP(uint8_t, bmul_u8, x * y); BINARY_OP(uint32_t, bmul_u32, x * y); BINARY_OP(int64_t, bmul_i64, x * y); BINARY_OP(float, bsub_f32, x - y) BINARY_OP(double, bsub_f64, x - y); BINARY_OP(uint8_t, bsub_u8, x - y); BINARY_OP(uint32_t, bsub_u32, x - y); BINARY_OP(int64_t, bsub_i64, x - y); BINARY_OP(float, bminimum_f32, ming(x, y)); BINARY_OP(double, bminimum_f64, ming(x, y)); BINARY_OP(uint8_t, bminimum_u8, ming(x, y)); BINARY_OP(uint32_t, bminimum_u32, ming(x, y)); BINARY_OP(int64_t, bminimum_i64, ming(x, y)); BINARY_OP(float, bmaximum_f32, maxg(x, y)); BINARY_OP(double, bmaximum_f64, maxg(x, y)); BINARY_OP(uint8_t, bmaximum_u8, maxg(x, y)); BINARY_OP(uint32_t, bmaximum_u32, maxg(x, y)); BINARY_OP(int64_t, bmaximum_i64, maxg(x, y)); BINARY_OP_OUT(float, uint8_t, eq_f32, x == y) BINARY_OP_OUT(double, uint8_t, eq_f64, x == y) BINARY_OP_OUT(uint8_t, uint8_t, eq_u8, x == y) BINARY_OP_OUT(uint32_t, uint8_t, eq_u32, x == y) BINARY_OP_OUT(int64_t, uint8_t, eq_i64, x == y) BINARY_OP_OUT(float, uint8_t, ne_f32, x != y) BINARY_OP_OUT(double, uint8_t, ne_f64, x != y) BINARY_OP_OUT(uint8_t, uint8_t, ne_u8, x != y) BINARY_OP_OUT(uint32_t, uint8_t, ne_u32, x != y) BINARY_OP_OUT(int64_t, uint8_t, ne_i64, x != y) BINARY_OP_OUT(float, uint8_t, lt_f32, x < y) BINARY_OP_OUT(double, uint8_t, lt_f64, x < y) BINARY_OP_OUT(uint8_t, uint8_t, lt_u8, x < y) BINARY_OP_OUT(uint32_t, uint8_t, lt_u32, x < y) BINARY_OP_OUT(int64_t, uint8_t, lt_i64, x < y) BINARY_OP_OUT(float, uint8_t, le_f32, x <= y) BINARY_OP_OUT(double, uint8_t, le_f64, x <= y) BINARY_OP_OUT(uint8_t, uint8_t, le_u8, x <= y) BINARY_OP_OUT(uint32_t, uint8_t, le_u32, x <= y) BINARY_OP_OUT(int64_t, uint8_t, le_i64, x <= y) BINARY_OP_OUT(float, uint8_t, gt_f32, x > y) BINARY_OP_OUT(double, uint8_t, gt_f64, x > y) BINARY_OP_OUT(uint8_t, uint8_t, gt_u8, x > y) BINARY_OP_OUT(uint32_t, uint8_t, gt_u32, x > y) BINARY_OP_OUT(int64_t, uint8_t, gt_i64, x > y) BINARY_OP_OUT(float, uint8_t, ge_f32, x >= y) BINARY_OP_OUT(double, uint8_t, ge_f64, x >= y) BINARY_OP_OUT(uint8_t, uint8_t, ge_u8, x >= y) BINARY_OP_OUT(uint32_t, uint8_t, ge_u32, x >= y) BINARY_OP_OUT(int64_t, uint8_t, ge_i64, x >= y)
candle/candle-kernels/src/binary.cu/0
{ "file_path": "candle/candle-kernels/src/binary.cu", "repo_id": "candle", "token_count": 2950 }
47
# candle-metal-kernels This crate contains Metal kernels used from candle.
candle/candle-metal-kernels/README.md/0
{ "file_path": "candle/candle-metal-kernels/README.md", "repo_id": "candle", "token_count": 18 }
48
// Imported from https://github.com/ggerganov/llama.cpp/blob/master/ggml-metal.metal #include <metal_stdlib> using namespace metal; #define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } #define SORT_ASC 1 #define SORT_DESC 0 template<int order, typename T> METAL_FUNC void argsort( device const T * x, device uint32_t * dst, constant int64_t & ncols, constant int64_t & ncols_pad, threadgroup uint32_t * shared_values [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]) { int col = tpitg[0]; int row = tgpig[1]; if (col >= ncols_pad) return; device const T * x_row = x + row * ncols; threadgroup uint32_t * dst_row = shared_values; // initialize indices dst_row[col] = col; threadgroup_barrier(mem_flags::mem_threadgroup); for (int k = 2; k <= ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } } threadgroup_barrier(mem_flags::mem_threadgroup); } } // copy the result to dst without the padding if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } #define ARGSORT(T, RUST_T) \ kernel void asort_asc_##RUST_T( \ device const T * x, \ device uint32_t * dst, \ constant int64_t & ncols, \ constant int64_t & ncols_pad, \ threadgroup uint32_t * shared_values [[threadgroup(0)]], \ uint3 tgpig[[threadgroup_position_in_grid]], \ uint3 tpitg[[thread_position_in_threadgroup]] \ ) { \ argsort<SORT_ASC, T>(x, dst, ncols, ncols_pad, shared_values, tgpig, tpitg); \ } \ kernel void asort_desc_##RUST_T( \ device const T * x, \ device uint32_t * dst, \ constant int64_t & ncols, \ constant int64_t & ncols_pad, \ threadgroup uint32_t * shared_values [[threadgroup(0)]], \ uint3 tgpig[[threadgroup_position_in_grid]], \ uint3 tpitg[[thread_position_in_threadgroup]] \ ) { \ argsort<SORT_DESC, T>(x, dst, ncols, ncols_pad, shared_values, tgpig, tpitg); \ } \ ARGSORT(float, f32) ARGSORT(half, f16) ARGSORT(uint8_t, u8) ARGSORT(uint32_t, u32) #if __METAL_VERSION__ >= 220 ARGSORT(int64_t, i64) #endif #if defined(__HAVE_BFLOAT__) ARGSORT(bfloat, bf16) #endif
candle/candle-metal-kernels/src/sort.metal/0
{ "file_path": "candle/candle-metal-kernels/src/sort.metal", "repo_id": "candle", "token_count": 1748 }
49
pub(crate) mod conv; pub(crate) mod layer_norm; pub(crate) mod softmax; use candle::{Device, Result}; pub(crate) trait BenchDevice { fn sync(&self) -> Result<()>; fn bench_name<S: Into<String>>(&self, name: S) -> String; } impl BenchDevice for Device { fn sync(&self) -> Result<()> { match self { Device::Cpu => Ok(()), Device::Cuda(device) => { #[cfg(feature = "cuda")] return Ok(device.synchronize()?); #[cfg(not(feature = "cuda"))] panic!("Cuda device without cuda feature enabled: {:?}", device) } Device::Metal(device) => { #[cfg(feature = "metal")] return Ok(device.wait_until_completed()?); #[cfg(not(feature = "metal"))] panic!("Metal device without metal feature enabled: {:?}", device) } } } fn bench_name<S: Into<String>>(&self, name: S) -> String { match self { Device::Cpu => { let cpu_type = if cfg!(feature = "accelerate") { "accelerate" } else if cfg!(feature = "mkl") { "mkl" } else { "cpu" }; format!("{}_{}", cpu_type, name.into()) } Device::Cuda(_) => format!("cuda_{}", name.into()), Device::Metal(_) => format!("metal_{}", name.into()), } } } struct BenchDeviceHandler { devices: Vec<Device>, } impl BenchDeviceHandler { pub fn new() -> Result<Self> { let mut devices = Vec::new(); if cfg!(feature = "metal") { devices.push(Device::new_metal(0)?); } else if cfg!(feature = "cuda") { devices.push(Device::new_cuda(0)?); } devices.push(Device::Cpu); Ok(Self { devices }) } }
candle/candle-nn/benches/benchmarks/mod.rs/0
{ "file_path": "candle/candle-nn/benches/benchmarks/mod.rs", "repo_id": "candle", "token_count": 1006 }
50
//! Loss Calculations //! use candle::{Result, Tensor}; /// The negative log likelihood loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to contain log probabilities. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn nll(inp: &Tensor, target: &Tensor) -> Result<Tensor> { let b_sz = match target.dims() { &[b_sz] => b_sz, dims => candle::bail!("the target tensor should have a single dimension ({dims:?})"), }; match inp.dims() { &[inp_b_sz, _] => { if inp_b_sz != b_sz { candle::bail!("batch size mismatch between inp ({inp_b_sz}) and target ({b_sz})") } } dims => candle::bail!("the target tensor should have two dimensions ({dims:?})"), } inp.gather(&target.unsqueeze(1)?, 1)? .sum_all()? .affine(-1f64 / b_sz as f64, 0.) } /// The cross-entropy loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to raw logits. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn cross_entropy(inp: &Tensor, target: &Tensor) -> Result<Tensor> { if inp.rank() != 2 { candle::bail!("cross_entropy expects an input tensor of rank 2") } let inp = crate::ops::log_softmax(inp, 1)?; nll(&inp, target) } /// The mean squared error loss. pub fn mse(inp: &Tensor, target: &Tensor) -> Result<Tensor> { (inp - target)?.sqr()?.mean_all() } /// The binary cross-entropy with logit loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to raw logits. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N, C` where `N` is the batch size and `C` the number /// of categories. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn binary_cross_entropy_with_logit(inp: &Tensor, target: &Tensor) -> Result<Tensor> { let inp = crate::ops::sigmoid(inp)?; let left_side = target * inp.log()?; let right_side = (target.affine(-1., 1.))? * inp.affine(-1., 1.)?.log()?; let loss = left_side? + right_side?; let loss = loss?.neg()?.mean_all()?; Ok(loss) }
candle/candle-nn/src/loss.rs/0
{ "file_path": "candle/candle-nn/src/loss.rs", "repo_id": "candle", "token_count": 1021 }
51
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::{to_vec0_round, to_vec2_round}; use anyhow::Result; use candle::{DType, Device, Tensor, Var}; use candle_nn::{AdamW, Linear, Module, Optimizer, ParamsAdamW, SGD}; #[test] fn sgd_optim() -> Result<()> { let x = Var::new(0f32, &Device::Cpu)?; let mut sgd = SGD::new(vec![x.clone()], 0.1)?; let xt = x.as_tensor(); for _step in 0..100 { let loss = ((xt - 4.2)? * (xt - 4.2)?)?; sgd.backward_step(&loss)? } assert_eq!(x.to_scalar::<f32>()?, 4.199999); Ok(()) } /* The results of this test have been checked against the following PyTorch code. import torch from torch import optim w_gen = torch.tensor([[3., 1.]]) b_gen = torch.tensor([-2.]) sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]]) sample_ys = sample_xs.matmul(w_gen.t()) + b_gen m = torch.nn.Linear(2, 1) with torch.no_grad(): m.weight.zero_() m.bias.zero_() optimizer = optim.SGD(m.parameters(), lr=0.004, momentum=0.) for _step in range(1000): optimizer.zero_grad() ys = m(sample_xs) loss = ((ys - sample_ys)**2).sum() loss.backward() optimizer.step() print(m.weight) print(m.bias) */ #[test] fn sgd_linear_regression() -> Result<()> { // Generate some linear data, y = 3.x1 + x2 - 2. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; // Now use backprop to run a linear regression between samples and get the coefficients back. let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?; let b = Var::new(0f32, &Device::Cpu)?; let mut sgd = SGD::new(vec![w.clone(), b.clone()], 0.004)?; let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone())); for _step in 0..1000 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; sgd.backward_step(&loss)?; } assert_eq!(w.to_vec2::<f32>()?, &[[2.9983196, 0.99790204]]); assert_eq!(b.to_scalar::<f32>()?, -1.9796902); Ok(()) } /* The following test returns the same values as the PyTorch code below. import torch from torch import optim w_gen = torch.tensor([[3., 1.]]) b_gen = torch.tensor([-2.]) sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]]) sample_ys = sample_xs.matmul(w_gen.t()) + b_gen m = torch.nn.Linear(2, 1) with torch.no_grad(): m.weight.zero_() m.bias.zero_() optimizer = optim.AdamW(m.parameters(), lr=0.1) for _step in range(100): optimizer.zero_grad() ys = m(sample_xs) loss = ((ys - sample_ys)**2).sum() loss.backward() optimizer.step() print(m.weight) print(m.bias) */ #[test] fn adamw_linear_regression() -> Result<()> { let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; // Now use backprop to run a linear regression between samples and get the coefficients back. let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?; let b = Var::new(0f32, &Device::Cpu)?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(vec![w.clone(), b.clone()], params)?; let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone())); for _step in 0..100 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; } assert_eq!(to_vec2_round(w.as_tensor(), 4)?, &[[2.7257, 0.7097]]); assert_eq!(to_vec0_round(b.as_tensor(), 4)?, 0.7873); Ok(()) } #[test] fn adamw_linear_regression_varmap() -> Result<()> { use candle_nn::Init::Const; // Similar as the previous test but using a VarMap. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; let mut var_map = candle_nn::VarMap::new(); let w = var_map.get((1, 2), "w", Const(0.), DType::F32, &Device::Cpu)?; let b = var_map.get((), "b", Const(0.), DType::F32, &Device::Cpu)?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(var_map.all_vars(), params)?; let lin = Linear::new(w, Some(b)); for _step in 0..100 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; } assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[2.7257, 0.7097]]); assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 0.7873); var_map.set([("w", Tensor::zeros((1, 2), DType::F32, &Device::Cpu)?)].into_iter())?; var_map.set([("b", Tensor::ones((), DType::F32, &Device::Cpu)?)].into_iter())?; assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[0., 0.]]); assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 1.); Ok(()) }
candle/candle-nn/tests/optim.rs/0
{ "file_path": "candle/candle-nn/tests/optim.rs", "repo_id": "candle", "token_count": 2568 }
52
from candle.utils import load_safetensors, save_gguf, load_gguf from candle.models.bert import BertModel, Config import json from candle import Tensor from tqdm import tqdm from dataclasses import fields import os import time from huggingface_hub import hf_hub_download from transformers import BertTokenizer, AutoModel import torch if __name__ == "__main__": model_name = "intfloat/e5-small-v2" model_file = hf_hub_download(repo_id=model_name, filename="model.safetensors") config_file = hf_hub_download(repo_id=model_name, filename="config.json") tensors = load_safetensors(model_file) config = Config() with open(config_file, "r") as f: raw_config = json.load(f) for field in fields(config): if field.name in raw_config: setattr(config, field.name, raw_config[field.name]) # Load the model model = BertModel(config) model.load_state_dict(tensors) hf_model = AutoModel.from_pretrained(model_name) tokenizer = BertTokenizer.from_pretrained(model_name) sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ] def average_pool(last_hidden_states: torch.Tensor, attention_mask: torch.Tensor): """Average the hidden states according to the attention mask""" last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] tokenized = tokenizer(sentences, padding=True) tokens = Tensor(tokenized["input_ids"]) token_type_ids = Tensor(tokenized["token_type_ids"]) attention_mask = Tensor(tokenized["attention_mask"]) encoder_out, _ = model.forward(tokens, token_type_ids, attention_mask=attention_mask) hf_tokenized = tokenizer(sentences, padding=True, return_tensors="pt") hf_result = hf_model(**hf_tokenized)["last_hidden_state"] hf_pooled = average_pool(hf_result, hf_tokenized["attention_mask"]) candle_pooled = average_pool(torch.tensor(encoder_out.values()), hf_tokenized["attention_mask"]) loss = torch.nn.L1Loss() error = loss(hf_pooled, candle_pooled).mean().item() print(f"Mean error between torch-reference and candle: {error}") # Quantize all attention 'weights' quantized_tensors = {} for name, tensor in tqdm(tensors.items(), desc="Quantizing tensors to 5-Bit"): if name.endswith("weight") and ("attention" in name or "intermediate" in name or "output" in name): # check if the tensor is k-quantizable if tensor.shape[-1] % 256 == 0: new_tensor = tensor.quantize("q4k") else: new_tensor = tensor.quantize("q5_0") quantized_tensors[name] = new_tensor else: quantized_tensors[name] = tensor.quantize("q8_0") print(f"Saving quantized tensors") # Remove all None values from the config config_to_save = {k: v for k, v in config.__dict__.items() if v is not None} # Save the model quantized_model_file = "e5_small.gguf" save_gguf(quantized_model_file, quantized_tensors, config_to_save) file_size_mb = os.path.getsize(model_file) / 1024 / 1024 file_size_mb_compressed = os.path.getsize(quantized_model_file) / 1024 / 1024 print(f"Compressed model from {file_size_mb:.2f} MB to {file_size_mb_compressed:.2f} MB") # Load the model from the gguf tensors, raw_config = load_gguf(quantized_model_file) config = Config() for field in fields(config): if field.name in raw_config: setattr(config, field.name, raw_config[field.name]) model = BertModel(config) # "embeddings.position_ids" is missing in the gguf as it is i64 model.load_state_dict(tensors, strict=False) # Run the model again encoder_out_2, pooled_output_2 = model.forward(tokens, token_type_ids) encoder_out_2, pooled_output_2 = encoder_out_2.to_device("cpu"), pooled_output_2.to_device("cpu") candle_pooled_2 = average_pool(torch.tensor(encoder_out_2.values()), hf_tokenized["attention_mask"]) error = loss(hf_pooled, candle_pooled_2).mean().item() print(f"Mean error between torch-reference and quantized-candle: {error}")
candle/candle-pyo3/e5.py/0
{ "file_path": "candle/candle-pyo3/e5.py", "repo_id": "candle", "token_count": 1778 }
53
import candle from candle import Tensor _UNSIGNED_DTYPES = set([str(candle.u8), str(candle.u32)]) def _assert_tensor_metadata( actual: Tensor, expected: Tensor, check_device: bool = True, check_dtype: bool = True, check_layout: bool = True, check_stride: bool = False, ): if check_device: assert actual.device == expected.device, f"Device mismatch: {actual.device} != {expected.device}" if check_dtype: assert str(actual.dtype) == str(expected.dtype), f"Dtype mismatch: {actual.dtype} != {expected.dtype}" if check_layout: assert actual.shape == expected.shape, f"Shape mismatch: {actual.shape} != {expected.shape}" if check_stride: assert actual.stride == expected.stride, f"Stride mismatch: {actual.stride} != {expected.stride}" def assert_equal( actual: Tensor, expected: Tensor, check_device: bool = True, check_dtype: bool = True, check_layout: bool = True, check_stride: bool = False, ): """ Asserts that two tensors are exact equals. """ _assert_tensor_metadata(actual, expected, check_device, check_dtype, check_layout, check_stride) assert (actual - expected).abs().sum_all().values() == 0, f"Tensors mismatch: {actual} != {expected}" def assert_almost_equal( actual: Tensor, expected: Tensor, rtol=1e-05, atol=1e-08, check_device: bool = True, check_dtype: bool = True, check_layout: bool = True, check_stride: bool = False, ): """ Asserts, that two tensors are almost equal by performing an element wise comparison of the tensors with a tolerance. Computes: |actual - expected| ≤ atol + rtol x |expected| """ _assert_tensor_metadata(actual, expected, check_device, check_dtype, check_layout, check_stride) # Secure against overflow of u32 and u8 tensors if str(actual.dtype) in _UNSIGNED_DTYPES or str(expected.dtype) in _UNSIGNED_DTYPES: actual = actual.to(candle.i64) expected = expected.to(candle.i64) diff = (actual - expected).abs() threshold = (expected.abs().to_dtype(candle.f32) * rtol + atol).to(expected) assert (diff <= threshold).sum_all().values() == actual.nelement, f"Difference between tensors was to great"
candle/candle-pyo3/py_src/candle/testing/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/testing/__init__.py", "repo_id": "candle", "token_count": 854 }
54
import candle from candle import Tensor from candle.testing import assert_equal, assert_almost_equal import pytest @pytest.mark.parametrize("dtype", [candle.f32, candle.f64, candle.f16, candle.u32, candle.u8, candle.i64]) def test_assert_equal_asserts_correctly(dtype: candle.DType): a = Tensor([1, 2, 3]).to(dtype) b = Tensor([1, 2, 3]).to(dtype) assert_equal(a, b) with pytest.raises(AssertionError): assert_equal(a, b + 1) @pytest.mark.parametrize("dtype", [candle.f32, candle.f64, candle.f16, candle.u32, candle.u8, candle.i64]) def test_assert_almost_equal_asserts_correctly(dtype: candle.DType): a = Tensor([1, 2, 3]).to(dtype) b = Tensor([1, 2, 3]).to(dtype) assert_almost_equal(a, b) with pytest.raises(AssertionError): assert_almost_equal(a, b + 1) assert_almost_equal(a, b + 1, atol=20) assert_almost_equal(a, b + 1, rtol=20) with pytest.raises(AssertionError): assert_almost_equal(a, b + 1, atol=0.9) with pytest.raises(AssertionError): assert_almost_equal(a, b + 1, rtol=0.1)
candle/candle-pyo3/tests/bindings/test_testing.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_testing.py", "repo_id": "candle", "token_count": 476 }
55
//! Chinese contrastive Language-Image Pre-Training //! //! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! - 💻 [Chinese-CLIP](https://github.com/OFA-Sys/Chinese-CLIP) //! - 💻 [HF](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py) use candle::{DType, Device, IndexOp, Module, Result, Tensor}; use candle_nn as nn; use super::Activation; /// Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For /// positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to /// [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). /// For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models /// with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). #[derive(Clone, Debug)] pub enum PositionEmbeddingType { Absolute, RelativeKey, RelativeKeyQuery, } #[derive(Clone, Debug)] pub struct ChineseClipTextConfig { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub intermediate_size: usize, pub hidden_act: Activation, pub hidden_dropout_prob: f32, pub attention_probs_dropout_prob: f64, pub max_position_embeddings: usize, pub type_vocab_size: usize, pub initializer_range: f64, pub initializer_factor: f64, pub layer_norm_eps: f64, pub pad_token_id: usize, pub position_embedding_type: PositionEmbeddingType, pub use_cache: bool, } impl Default for ChineseClipTextConfig { fn default() -> Self { Self { vocab_size: 30522, hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: Activation::Gelu, hidden_dropout_prob: 0.1, attention_probs_dropout_prob: 0.1, max_position_embeddings: 512, type_vocab_size: 2, initializer_range: 0.02, initializer_factor: 1.0, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, } } } impl ChineseClipTextConfig { /// [referer](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json) pub fn clip_vit_base_patch16() -> Self { Self { vocab_size: 21128, hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: Activation::Gelu, hidden_dropout_prob: 0.1, attention_probs_dropout_prob: 0.1, max_position_embeddings: 512, type_vocab_size: 2, initializer_range: 0.02, initializer_factor: 1.0, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, } } } #[derive(Clone, Debug)] pub struct ChineseClipTextEmbeddings { word_embeddings: nn::Embedding, position_embeddings: nn::Embedding, token_type_embeddings: nn::Embedding, layer_norm: nn::LayerNorm, dropout: nn::Dropout, position_embedding_type: PositionEmbeddingType, position_ids: Tensor, token_type_ids: Tensor, } impl ChineseClipTextEmbeddings { pub fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let word_embeddings = nn::embedding( config.vocab_size, config.hidden_size, var.pp("word_embeddings"), )?; let position_embeddings = nn::embedding( config.max_position_embeddings, config.hidden_size, var.pp("position_embeddings"), )?; let token_type_embeddings = nn::embedding( config.type_vocab_size, config.hidden_size, var.pp("token_type_embeddings"), )?; let layer_norm = nn::layer_norm::<f64>( config.hidden_size, config.layer_norm_eps, var.pp("LayerNorm"), )?; let dropout = nn::Dropout::new(config.hidden_dropout_prob); let position_ids = Tensor::arange(0u32, config.max_position_embeddings as u32, var.device())? .unsqueeze(0)?; let token_type_ids = Tensor::zeros(position_ids.shape(), DType::I64, var.device())?; Ok(Self { word_embeddings, position_embeddings, token_type_embeddings, layer_norm, dropout, position_embedding_type: config.position_embedding_type.clone(), position_ids, token_type_ids, }) } fn forward(&self, xs: &Tensor, token_type_ids: Option<&Tensor>) -> Result<Tensor> { let (_batch_size, seq_length) = xs.dims2()?; let position_ids = (0..seq_length as u32).collect::<Vec<_>>(); let position_ids = self.position_ids.index_select( &Tensor::new(&position_ids[..], self.position_ids.device())?, 1, )?; let word_embeddings = self.word_embeddings.forward(xs)?; let token_type_ids = match token_type_ids { Some(token_type_ids) => token_type_ids, None => &self.token_type_ids.i((.., 0..seq_length))?, }; let token_type_ids = token_type_ids.expand(xs.shape())?; let token_type_embeddings = self.token_type_embeddings.forward(&token_type_ids)?; let embeddings = (&word_embeddings + token_type_embeddings)?; let embeddings = match self.position_embedding_type { PositionEmbeddingType::Absolute => { let position_embeddings = self.position_embeddings.forward(&position_ids)?; let position_embeddings = position_embeddings.expand(embeddings.shape())?; (embeddings + position_embeddings)? } _ => embeddings, }; let embeddings = self.layer_norm.forward(&embeddings)?; let embeddings = self.dropout.forward(&embeddings, false)?; Ok(embeddings) } } /// Copied from [`crate::models::bert::BertSelfOutput`] to [`ChineseClipTextSelfOutput`] #[derive(Clone, Debug)] struct ChineseClipTextSelfOutput { dense: nn::Linear, layer_norm: nn::LayerNorm, dropout: nn::Dropout, span: tracing::Span, } impl ChineseClipTextSelfOutput { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let dense = nn::linear(config.hidden_size, config.hidden_size, var.pp("dense"))?; let layer_norm = nn::layer_norm( config.hidden_size, config.layer_norm_eps, var.pp("LayerNorm"), )?; let dropout = nn::Dropout::new(config.hidden_dropout_prob); Ok(Self { dense, layer_norm, dropout, span: tracing::span!(tracing::Level::TRACE, "self-out"), }) } fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let hidden_states = self.dropout.forward(&hidden_states, false)?; self.layer_norm.forward(&(hidden_states + input_tensor)?) } } /// Copied from [`crate::models::bert::BertSelfAttention`] to [`ChineseClipTextSelfAttention`] #[derive(Clone, Debug)] struct ChineseClipTextSelfAttention { query: nn::Linear, key: nn::Linear, value: nn::Linear, dropout: nn::Dropout, num_attention_heads: usize, attention_head_size: usize, span: tracing::Span, span_softmax: tracing::Span, } impl ChineseClipTextSelfAttention { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let attention_head_size = config.hidden_size / config.num_attention_heads; let all_head_size = config.num_attention_heads * attention_head_size; let dropout = nn::Dropout::new(config.hidden_dropout_prob); let hidden_size = config.hidden_size; let query = nn::linear(hidden_size, all_head_size, var.pp("query"))?; let value = nn::linear(hidden_size, all_head_size, var.pp("value"))?; let key = nn::linear(hidden_size, all_head_size, var.pp("key"))?; Ok(Self { query, key, value, dropout, num_attention_heads: config.num_attention_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "self-attn"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let mut new_x_shape = xs.dims().to_vec(); new_x_shape.pop(); new_x_shape.push(self.num_attention_heads); new_x_shape.push(self.attention_head_size); let xs = xs.reshape(new_x_shape.as_slice())?.transpose(1, 2)?; xs.contiguous() } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let query_layer = self.query.forward(hidden_states)?; let key_layer = self.key.forward(hidden_states)?; let value_layer = self.value.forward(hidden_states)?; let query_layer = self.transpose_for_scores(&query_layer)?; let key_layer = self.transpose_for_scores(&key_layer)?; let value_layer = self.transpose_for_scores(&value_layer)?; let attention_scores = query_layer.matmul(&key_layer.t()?)?; let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?; let attention_scores = attention_scores.broadcast_add(attention_mask)?; let attention_probs = { let _enter_sm = self.span_softmax.enter(); nn::ops::softmax(&attention_scores, candle::D::Minus1)? }; let attention_probs = self.dropout.forward(&attention_probs, false)?; let context_layer = attention_probs.matmul(&value_layer)?; let context_layer = context_layer.transpose(1, 2)?.contiguous()?; let context_layer = context_layer.flatten_from(candle::D::Minus2)?; Ok(context_layer) } } /// Copied from [`crate::models::bert::BertAttention`] to [`ChineseClipTextAttention`] #[derive(Clone, Debug)] struct ChineseClipTextAttention { self_attention: ChineseClipTextSelfAttention, self_output: ChineseClipTextSelfOutput, span: tracing::Span, } impl ChineseClipTextAttention { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let self_attention = ChineseClipTextSelfAttention::new(var.pp("self"), config)?; let self_output = ChineseClipTextSelfOutput::new(var.pp("output"), config)?; Ok(Self { self_attention, self_output, span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let self_outputs = self.self_attention.forward(hidden_states, attention_mask)?; let attention_output = self.self_output.forward(&self_outputs, hidden_states)?; Ok(attention_output) } } type HiddenActLayer = Activation; /// Copied from [`crate::models::bert::BertIntermediate`] to [`ChineseClipTextIntermediate`] #[derive(Clone, Debug)] struct ChineseClipTextIntermediate { dense: nn::Linear, intermediate_act: HiddenActLayer, span: tracing::Span, } impl ChineseClipTextIntermediate { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let dense = nn::linear( config.hidden_size, config.intermediate_size, var.pp("dense"), )?; Ok(Self { dense, intermediate_act: config.hidden_act, span: tracing::span!(tracing::Level::TRACE, "inter"), }) } } impl Module for ChineseClipTextIntermediate { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let ys = self.intermediate_act.forward(&hidden_states)?; Ok(ys) } } /// Copied from [`crate::models::bert::BertOutput`] to [`ChineseClipTextOutput`] #[derive(Clone, Debug)] struct ChineseClipTextOutput { dense: nn::Linear, layer_norm: nn::LayerNorm, dropout: nn::Dropout, span: tracing::Span, } impl ChineseClipTextOutput { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let dense = nn::linear( config.intermediate_size, config.hidden_size, var.pp("dense"), )?; let layer_norm = nn::layer_norm( config.hidden_size, config.layer_norm_eps, var.pp("LayerNorm"), )?; let dropout = nn::Dropout::new(config.hidden_dropout_prob); Ok(Self { dense, layer_norm, dropout, span: tracing::span!(tracing::Level::TRACE, "out"), }) } fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let hidden_states = self.dropout.forward(&hidden_states, false)?; self.layer_norm.forward(&(hidden_states + input_tensor)?) } } /// Copied from [`crate::models::bert::BertLayer`] to [`ChineseClipTextLayer`] #[derive(Clone, Debug)] struct ChineseClipTextLayer { attention: ChineseClipTextAttention, intermediate: ChineseClipTextIntermediate, output: ChineseClipTextOutput, span: tracing::Span, } impl ChineseClipTextLayer { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let attention = ChineseClipTextAttention::new(var.pp("attention"), config)?; let intermediate = ChineseClipTextIntermediate::new(var.pp("intermediate"), config)?; let output = ChineseClipTextOutput::new(var.pp("output"), config)?; Ok(Self { attention, intermediate, output, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let attention_output = self.attention.forward(hidden_states, attention_mask)?; // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 let intermediate_output = self.intermediate.forward(&attention_output)?; let layer_output = self .output .forward(&intermediate_output, &attention_output)?; Ok(layer_output) } } #[derive(Clone, Debug)] struct Tanh; impl Tanh { pub fn new() -> Self { Self {} } } impl Module for Tanh { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.tanh() } } #[derive(Clone, Debug)] struct ChineseClipTextPooler { dense: nn::Linear, activation: Tanh, } impl ChineseClipTextPooler { pub fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let dense = nn::linear(config.hidden_size, config.hidden_size, var.pp("dense"))?; let activation = Tanh::new(); Ok(Self { dense, activation }) } } impl Module for ChineseClipTextPooler { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let first_token_tensor = hidden_states.i((.., 0))?; let pooled_output = self.dense.forward(&first_token_tensor)?; let pooled_output = self.activation.forward(&pooled_output)?; Ok(pooled_output) } } #[derive(Clone, Debug)] struct ChineseClipTextEncoder { layers: Vec<ChineseClipTextLayer>, span: tracing::Span, } impl ChineseClipTextEncoder { fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let layers = (0..config.num_hidden_layers) .map(|index| ChineseClipTextLayer::new(var.pp(format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(ChineseClipTextEncoder { layers, span }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut hidden_states = hidden_states.clone(); // Use a loop rather than a fold as it's easier to modify when adding debug/... for layer in self.layers.iter() { hidden_states = layer.forward(&hidden_states, attention_mask)? } Ok(hidden_states) } } #[derive(Clone, Debug)] pub struct ChineseClipTextTransformer { embeddings: ChineseClipTextEmbeddings, encoder: ChineseClipTextEncoder, pooler: Option<ChineseClipTextPooler>, pub device: Device, span: tracing::Span, } impl ChineseClipTextTransformer { pub fn new(var: nn::VarBuilder, config: &ChineseClipTextConfig) -> Result<Self> { let embeddings = ChineseClipTextEmbeddings::new(var.pp("embeddings"), config)?; let encoder = ChineseClipTextEncoder::new(var.pp("encoder"), config)?; // see: https://github.com/huggingface/transformers/blob/e40bb4845e0eefb52ec1e9cac9c2446ab36aef81/src/transformers/models/chinese_clip/modeling_chinese_clip.py#L1362 // In the original Python version of the code, the pooler is not used, and there are no parameters for the pooler in the weight file. let pooler = if var.contains_tensor("pooler") { Some(ChineseClipTextPooler::new(var.pp("pooler"), config)?) } else { None }; Ok(Self { embeddings, encoder, pooler, device: var.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward( &self, input_ids: &Tensor, token_type_ids: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids, token_type_ids)?; let attention_mask = match attention_mask { Some(attention_mask) => attention_mask.clone(), None => input_ids.ones_like()?, }; let dtype = embedding_output.dtype(); // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L995 let attention_mask = get_extended_attention_mask(&attention_mask, dtype)?; let encoder_outputs = self.encoder.forward(&embedding_output, &attention_mask)?; let encoder_output = encoder_outputs.i((.., 0, ..))?; let pooled_output = match &self.pooler { Some(pooler) => pooler.forward(&encoder_output)?, None => encoder_output, }; Ok(pooled_output) } } fn get_extended_attention_mask(attention_mask: &Tensor, dtype: DType) -> Result<Tensor> { let attention_mask = match attention_mask.rank() { 3 => attention_mask.unsqueeze(1)?, 2 => attention_mask.unsqueeze(1)?.unsqueeze(1)?, _ => candle::bail!("Wrong shape for input_ids or attention_mask"), }; let attention_mask = attention_mask.to_dtype(dtype)?; // torch.finfo(dtype).min (attention_mask.ones_like()? - &attention_mask)?.broadcast_mul( &Tensor::try_from(f32::MIN)? .to_device(attention_mask.device())? .to_dtype(dtype)?, ) }
candle/candle-transformers/src/models/chinese_clip/text_model.rs/0
{ "file_path": "candle/candle-transformers/src/models/chinese_clip/text_model.rs", "repo_id": "candle", "token_count": 9007 }
56
//! Implementation of DistilBert, a distilled version of BERT. //! //! See: //! - ["DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"](https://arxiv.org/abs/1910.01108) //! use super::with_tracing::{layer_norm, linear, LayerNorm, Linear}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{Embedding, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum HiddenAct { Gelu, Relu, } struct HiddenActLayer { act: HiddenAct, span: tracing::Span, } impl HiddenActLayer { fn new(act: HiddenAct) -> Self { let span = tracing::span!(tracing::Level::TRACE, "hidden-act"); Self { act, span } } } impl Module for HiddenActLayer { fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let _enter = self.span.enter(); match self.act { // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213 HiddenAct::Gelu => xs.gelu(), HiddenAct::Relu => xs.relu(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)] #[serde(rename_all = "lowercase")] pub enum PositionEmbeddingType { #[default] Absolute, } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub dim: usize, n_layers: usize, n_heads: usize, hidden_dim: usize, activation: HiddenAct, max_position_embeddings: usize, initializer_range: f64, pub pad_token_id: usize, #[serde(default)] position_embedding_type: PositionEmbeddingType, #[serde(default)] use_cache: bool, model_type: Option<String>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 30522, dim: 768, n_layers: 12, n_heads: 12, hidden_dim: 3072, activation: HiddenAct::Gelu, max_position_embeddings: 512, initializer_range: 0.02, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, model_type: Some("distilbert".to_string()), } } } struct Embeddings { word_embeddings: Embedding, position_embeddings: Embedding, layer_norm: LayerNorm, span: tracing::Span, } impl Embeddings { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = candle_nn::embedding(config.vocab_size, config.dim, vb.pp("word_embeddings"))?; let position_embeddings = candle_nn::embedding( config.max_position_embeddings, config.dim, vb.pp("position_embeddings"), )?; let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("LayerNorm"))?; Ok(Self { word_embeddings, position_embeddings, layer_norm, span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_bsize, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let position_ids = (0..seq_len as u32).collect::<Vec<_>>(); let position_ids = Tensor::new(&position_ids[..], input_ids.device())?; let embeddings = input_embeddings.broadcast_add(&self.position_embeddings.forward(&position_ids)?)?; let embeddings = self.layer_norm.forward(&embeddings)?; Ok(embeddings) } } struct MultiHeadSelfAttention { q_lin: Linear, k_lin: Linear, v_lin: Linear, out_lin: Linear, n_heads: usize, attention_head_size: usize, span: tracing::Span, } impl MultiHeadSelfAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention_head_size = config.dim / config.n_heads; let all_head_size = config.n_heads * attention_head_size; let dim = config.dim; let q_lin = linear(dim, all_head_size, vb.pp("q_lin"))?; let v_lin = linear(dim, all_head_size, vb.pp("v_lin"))?; let k_lin = linear(dim, all_head_size, vb.pp("k_lin"))?; let out_lin = linear(all_head_size, dim, vb.pp("out_lin"))?; Ok(Self { q_lin, k_lin, v_lin, out_lin, n_heads: config.n_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } } impl MultiHeadSelfAttention { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (bs, q_length, _dim) = hidden_states.dims3()?; let dim_per_head = self.attention_head_size; let q = self.q_lin.forward(hidden_states)?; let k = self.k_lin.forward(hidden_states)?; let v = self.v_lin.forward(hidden_states)?; let q = q .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let k = k .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let v = v .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let q: Tensor = (q / (dim_per_head as f64).sqrt())?; let scores = q.matmul(&k.transpose(2, 3)?.contiguous()?)?; let mask = attention_mask.broadcast_as(scores.shape())?; let scores = masked_fill(&scores.to_dtype(DType::F32)?, &mask, f32::NEG_INFINITY)?; let weights = candle_nn::ops::softmax(&scores, candle::D::Minus1)?; let context = weights.matmul(&v.contiguous()?)?; let context = context .transpose(1, 2)? .reshape((bs, q_length, self.n_heads * dim_per_head))? .contiguous()?; let context = self.out_lin.forward(&context)?; Ok(context) } } #[allow(clippy::upper_case_acronyms)] struct FFN { lin1: Linear, lin2: Linear, activation: HiddenActLayer, span: tracing::Span, } impl FFN { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let lin1 = linear(config.dim, config.hidden_dim, vb.pp("lin1"))?; let lin2 = linear(config.hidden_dim, config.dim, vb.pp("lin2"))?; Ok(Self { lin1, lin2, activation: HiddenActLayer::new(config.activation), span: tracing::span!(tracing::Level::TRACE, "ffn"), }) } } impl Module for FFN { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); hidden_states .apply(&self.lin1)? .apply(&self.activation)? .apply(&self.lin2) } } struct TransformerBlock { attention: MultiHeadSelfAttention, sa_layer_norm: LayerNorm, ffn: FFN, output_layer_norm: LayerNorm, span: tracing::Span, } impl TransformerBlock { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention = MultiHeadSelfAttention::load(vb.pp("attention"), config)?; let sa_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("sa_layer_norm"))?; let ffn = FFN::load(vb.pp("ffn"), config)?; let output_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("output_layer_norm"))?; Ok(Self { attention, sa_layer_norm, ffn, output_layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } } impl TransformerBlock { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let sa_output = self.attention.forward(hidden_states, attention_mask)?; // TODO: Support cross-attention? // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 // TODO: Support something similar to `apply_chunking_to_forward`? let sa_output = sa_output.broadcast_add(hidden_states)?; let sa_output = self.sa_layer_norm.forward(&sa_output)?; let ffn_output = self.ffn.forward(&sa_output)?; let ffn_output = (&ffn_output + sa_output)?; let output = self.output_layer_norm.forward(&ffn_output)?; Ok(output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556 struct Transformer { layers: Vec<TransformerBlock>, span: tracing::Span, } impl Transformer { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.n_layers) .map(|index| TransformerBlock::load(vb.pp(format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(Transformer { layers, span }) } } impl Transformer { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut hidden_states = hidden_states.clone(); // Use a loop rather than a fold as it's easier to modify when adding debug/... for layer in self.layers.iter() { hidden_states = layer.forward(&hidden_states, attention_mask)?; } Ok(hidden_states) } } pub struct DistilBertModel { embeddings: Embeddings, transformer: Transformer, pub device: Device, span: tracing::Span, } impl DistilBertModel { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let (embeddings, transformer) = match ( Embeddings::load(vb.pp("embeddings"), config), Transformer::load(vb.pp("transformer"), config), ) { (Ok(embeddings), Ok(encoder)) => (embeddings, encoder), (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( Embeddings::load(vb.pp(format!("{model_type}.embeddings")), config), Transformer::load(vb.pp(format!("{model_type}.transformer")), config), ) { (embeddings, encoder) } else { return Err(err); } } else { return Err(err); } } }; Ok(Self { embeddings, transformer, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids)?; let sequence_output = self .transformer .forward(&embedding_output, attention_mask)?; Ok(sequence_output) } } struct DistilBertPredictionHeadTransform { dense: Linear, activation: HiddenActLayer, layer_norm: LayerNorm, } impl DistilBertPredictionHeadTransform { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.dim, config.dim, vb.pp("vocab_transform"))?; let activation = HiddenActLayer::new(config.activation); let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("vocab_layer_norm"))?; Ok(Self { dense, activation, layer_norm, }) } } impl Module for DistilBertPredictionHeadTransform { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let hidden_states = self .activation .forward(&self.dense.forward(hidden_states)?)?; self.layer_norm.forward(&hidden_states) } } // https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L769C1-L790C1 pub struct DistilBertLMPredictionHead { transform: DistilBertPredictionHeadTransform, decoder: Linear, } impl DistilBertLMPredictionHead { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let transform = DistilBertPredictionHeadTransform::load(vb.clone(), config)?; // distil_bert_uncased uses the word embeddings for the vocab projector weight, but has a separate vocab_projector bias let vocab_projector_weight_vb = vb.pp("distilbert.embeddings.word_embeddings"); let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL; let ws = vocab_projector_weight_vb.get_with_hints( (config.vocab_size, config.dim), "weight", init_ws, )?; let bound = 1. / (config.dim as f64).sqrt(); let init_bs = candle_nn::Init::Uniform { lo: -bound, up: bound, }; let vocab_projector_bias_vb = vb.pp("vocab_projector"); let bs = vocab_projector_bias_vb.get_with_hints(config.vocab_size, "bias", init_bs)?; let decoder = Linear::from_weights(ws, Some(bs)); Ok(Self { transform, decoder }) } } impl Module for DistilBertLMPredictionHead { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { self.decoder .forward(&self.transform.forward(hidden_states)?) } } // https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L792 pub struct DistilBertOnlyMLMHead { predictions: DistilBertLMPredictionHead, } impl DistilBertOnlyMLMHead { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let predictions = DistilBertLMPredictionHead::load(vb.clone(), config)?; Ok(Self { predictions }) } } impl Module for DistilBertOnlyMLMHead { fn forward(&self, sequence_output: &Tensor) -> Result<Tensor> { self.predictions.forward(sequence_output) } } pub struct DistilBertForMaskedLM { pub bert: DistilBertModel, cls: DistilBertOnlyMLMHead, } impl DistilBertForMaskedLM { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let bert = DistilBertModel::load(vb.pp("distilbert"), config)?; let cls = DistilBertOnlyMLMHead::load(vb.clone(), config)?; Ok(Self { bert, cls }) } pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let sequence_output = self.bert.forward(input_ids, attention_mask)?; self.cls.forward(&sequence_output) } }
candle/candle-transformers/src/models/distilbert.rs/0
{ "file_path": "candle/candle-transformers/src/models/distilbert.rs", "repo_id": "candle", "token_count": 7003 }
57
use crate::models::glm4::EosTokenId; use crate::{ models::with_tracing::{linear_b, linear_no_bias, Linear, RmsNorm}, utils::repeat_kv, }; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{kv_cache::KvCache, Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub head_dim: Option<usize>, pub partial_rotary_factor: Option<f32>, pub attention_bias: Option<bool>, pub num_key_value_heads: usize, pub max_position_embeddings: usize, pub sliding_window: Option<usize>, pub tie_word_embeddings: bool, pub rope_theta: f64, pub rms_norm_eps: f64, pub hidden_act: Activation, pub eos_token_id: Option<EosTokenId>, } #[derive(Debug, Clone)] pub(crate) struct RotaryEmbedding { sin: Tensor, cos: Tensor, rotary_dim: usize, } impl RotaryEmbedding { pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg .head_dim .unwrap_or(cfg.hidden_size / cfg.num_attention_heads); let rotary_dim = if cfg.partial_rotary_factor.is_some() { (cfg.partial_rotary_factor.unwrap() * dim as f32) as usize } else { dim }; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..rotary_dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / rotary_dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, rotary_dim, }) } pub(crate) fn apply(&self, xs: &Tensor, offset: usize) -> Result<Tensor> { let (_, _, seq_len, _) = xs.dims4()?; let (s, e) = (offset, offset + seq_len); let cos = self.cos.i((s..e, ..))?.contiguous()?; let sin = self.sin.i((s..e, ..))?.contiguous()?; let xs_rot = xs .i((0, .., .., ..self.rotary_dim))? .unsqueeze(0)? .contiguous()?; let xs_pass = xs.i((0, .., .., self.rotary_dim..))?.unsqueeze(0)?; let xs_rot = candle_nn::rotary_emb::rope_i(&xs_rot, &cos, &sin).unwrap(); Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1)?.contiguous() } } #[derive(Debug, Clone)] pub(crate) struct Mlp { gate_up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl Mlp { pub(crate) fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { Ok(Self { gate_up_proj: linear_no_bias( cfg.hidden_size, cfg.intermediate_size * 2, vb.pp("gate_up_proj"), )?, down_proj: linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("down_proj"))?, act_fn: cfg.hidden_act, }) } } impl Module for Mlp { fn forward(&self, x: &Tensor) -> Result<Tensor> { let w = self.gate_up_proj.forward(x)?; let dim = w.dims().len() - 1; let gate = w.narrow(dim, 0, w.dim(dim)? / 2)?.contiguous()?; let gate = gate.apply(&self.act_fn)?; let up_states = w .narrow(dim, w.dim(dim)? / 2, w.dim(dim)? / 2)? .contiguous()?; self.down_proj.forward(&(gate * up_states)?) } } #[derive(Debug, Clone)] pub(crate) struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: KvCache, } impl Attention { pub(crate) fn new( cfg: &Config, rotary_emb: Arc<RotaryEmbedding>, vb: VarBuilder, ) -> Result<Self> { let head_dim = cfg .head_dim .unwrap_or(cfg.hidden_size / cfg.num_attention_heads); let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let q_proj = linear_b( cfg.hidden_size, num_heads * head_dim, cfg.attention_bias.unwrap_or(false), vb.pp("q_proj"), )?; let k_proj = linear_b( cfg.hidden_size, num_kv_heads * head_dim, cfg.attention_bias.unwrap_or(false), vb.pp("k_proj"), )?; let v_proj = linear_b( cfg.hidden_size, num_kv_heads * head_dim, cfg.attention_bias.unwrap_or(false), vb.pp("v_proj"), )?; let o_proj = linear_b( num_heads * head_dim, cfg.hidden_size, false, vb.pp("o_proj"), )?; // Necessary because the hidden_size in the config isn't always accurate let hidden_size = head_dim * cfg.num_attention_heads; // Initialize KV cache with 512 tokens capacity to reduce initial memory allocation. // The cache will grow in chunks of 512 tokens when needed. let kv_cache = KvCache::new(2, 512); Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size, rotary_emb, kv_cache, }) } pub(crate) fn forward( &mut self, x: &Tensor, attn_mask: Option<&Tensor>, offset: usize, ) -> Result<Tensor> { let (b, l, _) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q .reshape((b, l, self.num_heads, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b, l, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b, l, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let q = self.rotary_emb.apply(&q, offset)?; let k = self.rotary_emb.apply(&k, offset)?; let (k, v) = self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?; let k = repeat_kv(k, self.num_kv_groups)?; let v = repeat_kv(v, self.num_kv_groups)?; let scale = 1.0 / (self.head_dim as f64).sqrt(); let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?; if let Some(m) = attn_mask { scores = scores.broadcast_add(m)?; } let probs = candle_nn::ops::softmax_last_dim(&scores)?; let ctx = probs.matmul(&v)?; ctx.transpose(1, 2)? .reshape((b, l, self.hidden_size))? .apply(&self.o_proj) } pub(crate) fn clear_kv_cache(&mut self) { self.kv_cache.reset(); } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: Mlp, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, post_mlp_layernorm: RmsNorm, post_self_attn_layernorm: RmsNorm, } impl DecoderLayer { fn new(cfg: &Config, rotary: Arc<RotaryEmbedding>, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(cfg, rotary, vb.pp("self_attn"))?; let mlp = Mlp::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; let post_self_attn_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_self_attn_layernorm"), )?; let post_mlp_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_mlp_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, post_self_attn_layernorm, post_mlp_layernorm, }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> { let residual = xs; let hidden_states = self.input_layernorm.forward(xs)?; let hidden_states = self.self_attn.forward(&hidden_states, mask, offset)?; let hidden_states = self.post_self_attn_layernorm.forward(&hidden_states)?; let hidden_states = (residual + hidden_states)?; let residual = &hidden_states; let hidden_states = self.post_attention_layernorm.forward(&hidden_states)?; let hidden_states = self.mlp.forward(&hidden_states)?; let hidden_states = self.post_mlp_layernorm.forward(&hidden_states)?; residual + hidden_states } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?; let rotary = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb.pp("model.layers"); for i in 0..cfg.num_hidden_layers { layers.push(DecoderLayer::new(cfg, rotary.clone(), vb_l.pp(i))?); } Ok(Self { embed_tokens, layers, norm: RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?, device: vb.device().clone(), dtype: vb.dtype(), }) } fn clear_kv_cache(&mut self) { for l in &mut self.layers { l.clear_kv_cache(); } } fn causal_mask( &self, b: usize, tgt: usize, offset: usize, sw: Option<usize>, ) -> Result<Tensor> { let minf = f32::NEG_INFINITY; let mask: Vec<_> = (0..tgt) .flat_map(|i| { (0..(tgt + offset)).map(move |j| { let past_ok = j <= i + offset; let sw_ok = match sw { Some(w) => (i + offset) as i64 - j as i64 <= w as i64, None => true, }; if past_ok && sw_ok { 0. } else { minf } }) }) .collect(); Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype) } pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> { let (b, l) = input.dims2()?; let mut h = self.embed_tokens.forward(input)?; let causal = if l == 1 { None } else { Some(self.causal_mask(b, l, offset, None)?) }; for layer in &mut self.layers { h = layer.forward(&h, causal.as_ref(), offset)?; } self.norm.forward(&h) } } #[derive(Debug, Clone)] pub struct ModelForCausalLM { base: Model, lm_head: Linear, } impl ModelForCausalLM { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let base = Model::new(cfg, vb.clone())?; let lm_head = if cfg.tie_word_embeddings { Linear::from_weights(base.embed_tokens.embeddings().clone(), None) } else { linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? }; Ok(Self { base, lm_head }) } pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> { let (_, l) = input.dims2()?; self.base .forward(input, offset)? .narrow(1, l - 1, 1)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { self.base.clear_kv_cache(); } }
candle/candle-transformers/src/models/glm4_new.rs/0
{ "file_path": "candle/candle-transformers/src/models/glm4_new.rs", "repo_id": "candle", "token_count": 6724 }
58
//! mimi model //! //! [Mimi](https://huggingface.co/kyutai/mimi) is a state of the art audio //! compression model using an encoder/decoder architecture with residual vector //! quantization. The candle implementation supports streaming meaning that it's //! possible to encode or decode a stream of audio tokens on the flight to provide //! low latency interaction with an audio model. //! //! - 🤗 [HuggingFace Model Card](https://huggingface.co/kyutai/mimi) //! - 💻 [GitHub](https://github.com/kyutai-labs/moshi) //! //! //! # Example //! ```bash //! # Generating some audio tokens from an audio files. //! wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3 //! cargo run --example mimi \ //! --features mimi --release -- \ //! audio-to-code bria.mp3 bria.safetensors //! //! # And decoding the audio tokens back into a sound file. //! cargo run --example mimi //! --features mimi --release -- \ //! code-to-audio bria.safetensors bria.wav //! // Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. pub use candle; pub use candle_nn; pub mod conv; pub mod encodec; pub mod quantization; pub mod seanet; pub mod transformer; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum NormType { RmsNorm, LayerNorm, } pub use encodec::{load, Config, Encodec as Model};
candle/candle-transformers/src/models/mimi/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/mimi/mod.rs", "repo_id": "candle", "token_count": 480 }
59
//! ModernBERT //! //! ModernBERT is a modernized bidirectional encoder-only Transformer model. //! - [Arxiv](https://arxiv.org/abs/2412.13663) "Smarter, Better, Faster, Longer: A Modern Bidirectional Encoder for Fast, Memory Efficient, and Long Context Finetuning and Inference" //! - Upstream [Github repo](https://github.com/AnswerDotAI/ModernBERT). //! - See modernbert in [candle-examples](https://github.com/huggingface/candle/tree/main/candle-examples/) for runnable code //! use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{ embedding, layer_norm_no_bias, linear, linear_no_bias, ops::softmax, Embedding, LayerNorm, Linear, Module, VarBuilder, }; use serde::Deserialize; use core::f32; use std::collections::HashMap; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub intermediate_size: usize, pub max_position_embeddings: usize, pub layer_norm_eps: f64, pub pad_token_id: u32, pub global_attn_every_n_layers: usize, pub global_rope_theta: f64, pub local_attention: usize, pub local_rope_theta: f64, #[serde(default)] #[serde(flatten)] pub classifier_config: Option<ClassifierConfig>, } #[derive(Debug, Clone, Deserialize, PartialEq, Copy, Default)] #[serde(rename_all = "lowercase")] pub enum ClassifierPooling { #[default] CLS, MEAN, } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ClassifierConfig { pub id2label: HashMap<String, String>, pub label2id: HashMap<String, String>, pub classifier_pooling: ClassifierPooling, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, config: &Config, rope_theta: f64, dev: &Device) -> Result<Self> { let dim = config.hidden_size / config.num_attention_heads; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let max_seq_len = config.max_position_embeddings; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> { let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &self.cos, &self.sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &self.cos, &self.sin)?; Ok((q_embed, k_embed)) } } #[derive(Clone)] struct ModernBertAttention { qkv: Linear, proj: Linear, num_attention_heads: usize, attention_head_size: usize, rotary_emb: Arc<RotaryEmbedding>, } impl ModernBertAttention { fn load(vb: VarBuilder, config: &Config, rotary_emb: Arc<RotaryEmbedding>) -> Result<Self> { let num_attention_heads = config.num_attention_heads; let attention_head_size = config.hidden_size / config.num_attention_heads; let qkv = linear_no_bias(config.hidden_size, config.hidden_size * 3, vb.pp("Wqkv"))?; let proj = linear_no_bias(config.hidden_size, config.hidden_size, vb.pp("Wo"))?; Ok(Self { qkv, proj, num_attention_heads, attention_head_size, rotary_emb, }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let xs = hidden_states.clone(); let (b, seq_len, d) = xs.dims3()?; let qkv = xs .apply(&self.qkv)? .reshape(( b, seq_len, 3, self.num_attention_heads, self.attention_head_size, ))? .permute((2, 0, 3, 1, 4))?; let q = qkv.get(0)?; let k = qkv.get(1)?; let v = qkv.get(2)?; let (q, k) = self.rotary_emb.apply_rotary_emb_qkv(&q, &k)?; let scale = (self.attention_head_size as f64).powf(-0.5); let q = (q * scale)?; let att = q.matmul(&k.transpose(D::Minus2, D::Minus1)?)?; let att = att.broadcast_add(attention_mask)?; let att = softmax(&att, D::Minus1)?; let xs = att.matmul(&v)?; let xs = xs.transpose(1, 2)?.reshape((b, seq_len, d))?; let xs = xs.apply(&self.proj)?; let xs = xs.reshape((b, seq_len, d))?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertMLP { wi: Linear, wo: Linear, } impl ModernBertMLP { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let wi = linear_no_bias( config.hidden_size, config.intermediate_size * 2, vb.pp("Wi"), )?; let wo = linear_no_bias(config.intermediate_size, config.hidden_size, vb.pp("Wo"))?; Ok(Self { wi, wo }) } } impl Module for ModernBertMLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.wi)?; let xs = xs.chunk(2, D::Minus1)?; let xs = (&xs[0].gelu_erf()? * &xs[1])?.apply(&self.wo)?; // GeGLU Ok(xs) } } #[derive(Clone)] pub struct ModernBertLayer { attn: ModernBertAttention, mlp: ModernBertMLP, attn_norm: Option<LayerNorm>, mlp_norm: LayerNorm, uses_local_attention: bool, } impl ModernBertLayer { fn load( vb: VarBuilder, config: &Config, rotary_emb: Arc<RotaryEmbedding>, uses_local_attention: bool, ) -> Result<Self> { let attn = ModernBertAttention::load(vb.pp("attn"), config, rotary_emb)?; let mlp = ModernBertMLP::load(vb.pp("mlp"), config)?; let attn_norm = layer_norm_no_bias( config.hidden_size, config.layer_norm_eps, vb.pp("attn_norm"), ) .ok(); let mlp_norm = layer_norm_no_bias(config.hidden_size, config.layer_norm_eps, vb.pp("mlp_norm"))?; Ok(Self { attn, mlp, attn_norm, mlp_norm, uses_local_attention, }) } fn forward( &self, xs: &Tensor, global_attention_mask: &Tensor, local_attention_mask: &Tensor, ) -> Result<Tensor> { let residual = xs.clone(); let mut xs = xs.clone(); if let Some(norm) = &self.attn_norm { xs = xs.apply(norm)?; } let attention_mask = if self.uses_local_attention { &global_attention_mask.broadcast_add(local_attention_mask)? } else { global_attention_mask }; let xs = self.attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let mlp_out = xs.apply(&self.mlp_norm)?.apply(&self.mlp)?; let xs = (xs + mlp_out)?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertHead { dense: Linear, norm: LayerNorm, } impl ModernBertHead { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear_no_bias(config.hidden_size, config.hidden_size, vb.pp("dense"))?; let norm = layer_norm_no_bias(config.hidden_size, config.layer_norm_eps, vb.pp("norm"))?; Ok(Self { dense, norm }) } } impl Module for ModernBertHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.dense)?.gelu_erf()?.apply(&self.norm)?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertDecoder { decoder: Linear, } impl ModernBertDecoder { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { // The decoder weights are tied with the embeddings layer weights let decoder_weights = vb.get( (config.vocab_size, config.hidden_size), "model.embeddings.tok_embeddings.weight", )?; let decoder_bias = vb.get(config.vocab_size, "decoder.bias")?; let decoder = Linear::new(decoder_weights, Some(decoder_bias)); Ok(Self { decoder }) } } impl Module for ModernBertDecoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.decoder)?; Ok(xs) } } // Global attention mask calculated from padded token inputs fn prepare_4d_attention_mask( mask: &Tensor, dtype: DType, tgt_len: Option<usize>, ) -> Result<Tensor> { let bsz = mask.dim(0)?; let src_len = mask.dim(1)?; let tgt_len = tgt_len.unwrap_or(src_len); let expanded_mask = mask .unsqueeze(1)? .unsqueeze(2)? .expand((bsz, 1, tgt_len, src_len))? .to_dtype(dtype)?; let inverted_mask = (1.0 - expanded_mask)?; (inverted_mask * f32::MIN as f64)?.to_dtype(dtype) } // Attention mask caused by the sliding window fn get_local_attention_mask( seq_len: usize, max_distance: usize, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..seq_len) .flat_map(|i| { (0..seq_len).map(move |j| { if (j as i32 - i as i32).abs() > max_distance as i32 { f32::NEG_INFINITY } else { 0. } }) }) .collect(); Tensor::from_slice(&mask, (seq_len, seq_len), device) } // ModernBERT backbone #[derive(Clone)] pub struct ModernBert { word_embeddings: Embedding, norm: LayerNorm, layers: Vec<ModernBertLayer>, final_norm: LayerNorm, local_attention_size: usize, } impl ModernBert { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = embedding( config.vocab_size, config.hidden_size, vb.pp("model.embeddings.tok_embeddings"), )?; let norm = layer_norm_no_bias( config.hidden_size, config.layer_norm_eps, vb.pp("model.embeddings.norm"), )?; let global_rotary_emb = Arc::new(RotaryEmbedding::new( vb.dtype(), config, config.global_rope_theta, vb.device(), )?); let local_rotary_emb = Arc::new(RotaryEmbedding::new( vb.dtype(), config, config.local_rope_theta, vb.device(), )?); let mut layers = Vec::with_capacity(config.num_hidden_layers); for layer_id in 0..config.num_hidden_layers { let layer_uses_local_attention = layer_id % config.global_attn_every_n_layers != 0; layers.push(ModernBertLayer::load( vb.pp(format!("model.layers.{layer_id}")), config, if layer_uses_local_attention { local_rotary_emb.clone() } else { global_rotary_emb.clone() }, layer_uses_local_attention, )?); } let final_norm = layer_norm_no_bias( config.hidden_size, config.layer_norm_eps, vb.pp("model.final_norm"), )?; Ok(Self { word_embeddings, norm, layers, final_norm, local_attention_size: config.local_attention, }) } pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> { let seq_len = xs.shape().dims()[1]; let global_attention_mask = prepare_4d_attention_mask(mask, DType::F32, None)?.to_device(xs.device())?; let local_attention_mask = get_local_attention_mask(seq_len, self.local_attention_size / 2, xs.device())?; let mut xs = xs.apply(&self.word_embeddings)?.apply(&self.norm)?; for layer in self.layers.iter() { xs = layer.forward(&xs, &global_attention_mask, &local_attention_mask)?; } let xs = xs.apply(&self.final_norm)?; Ok(xs) } } // ModernBERT for the fill-mask task #[derive(Clone)] pub struct ModernBertForMaskedLM { model: ModernBert, decoder: ModernBertDecoder, head: ModernBertHead, } impl ModernBertForMaskedLM { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let model = ModernBert::load(vb.clone(), config)?; let decoder = ModernBertDecoder::load(vb.clone(), config)?; let head = ModernBertHead::load(vb.pp("head"), config)?; Ok(Self { model, decoder, head, }) } pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> { let xs = self .model .forward(xs, mask)? .apply(&self.head)? .apply(&self.decoder)?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertClassifier { classifier: Linear, } impl ModernBertClassifier { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { // The decoder weights are tied with the embeddings layer weights let classifier = linear( config.hidden_size, config .classifier_config .as_ref() .map(|cc| cc.id2label.len()) .unwrap_or_default(), vb.pp("classifier"), )?; Ok(Self { classifier }) } } impl Module for ModernBertClassifier { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.classifier)?; softmax(&xs, D::Minus1) } } #[derive(Clone)] pub struct ModernBertForSequenceClassification { model: ModernBert, head: ModernBertHead, classifier: ModernBertClassifier, classifier_pooling: ClassifierPooling, } impl ModernBertForSequenceClassification { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let model = ModernBert::load(vb.clone(), config)?; let classifier = ModernBertClassifier::load(vb.clone(), config)?; let head = ModernBertHead::load(vb.pp("head"), config)?; Ok(Self { model, head, classifier, classifier_pooling: config .classifier_config .as_ref() .map(|cc| cc.classifier_pooling) .unwrap_or_default(), }) } pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> { let output = self.model.forward(xs, mask)?; let last_hidden_state = match self.classifier_pooling { ClassifierPooling::CLS => output.i((.., .., 0))?, ClassifierPooling::MEAN => { let unsqueezed_mask = &mask.unsqueeze(D::Minus1)?.to_dtype(DType::F32)?; let sum_output = output.broadcast_mul(unsqueezed_mask)?.sum(1)?; sum_output.broadcast_div(&mask.sum_keepdim(1)?.to_dtype(DType::F32)?)? } }; let xs = self .head .forward(&last_hidden_state)? .apply(&self.classifier)?; Ok(xs) } }
candle/candle-transformers/src/models/modernbert.rs/0
{ "file_path": "candle/candle-transformers/src/models/modernbert.rs", "repo_id": "candle", "token_count": 7630 }
60
//! Recurrent Gemma model implementation with quantization support. //! //! Gemma is a large language model optimized for efficiency. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Recurrent blocks with gated recurrent units //! - Convolution and attention blocks //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - [Gemma Paper](https://arxiv.org/abs/2401.06751) //! - [Model Card](https://ai.google.dev/gemma) //! use crate::quantized_nn::{linear_b as linear, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use std::sync::Arc; use crate::models::recurrent_gemma::{Config, Rglru, RmsNorm, RotaryEmbedding, TemporalBlockType}; fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> { let weight = vb.get(size, "weight")?.dequantize(vb.device())?; Ok(RmsNorm::from_weight(weight, eps)) } #[derive(Debug, Clone)] struct Mlp { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let intermediate_size = cfg.intermediate_size / 2; let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?; let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?; let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_activation, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; (gate * xs.apply(&self.up_proj))?.apply(&self.down_proj) } } fn rglru(cfg: &Config, vb: VarBuilder) -> Result<Rglru> { let h = cfg.hidden_size; let lru_width = cfg.lru_width.unwrap_or(h); let n_heads = cfg.num_attention_heads; let block_width = lru_width / n_heads; let recurrent_param = vb.get((lru_width,), "recurrent_param")?; let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?; let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?; let recurrent_gate_weight = vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?; let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?; Ok(Rglru { recurrent_param: recurrent_param.dequantize(vb.device())?, input_gate_bias: input_gate_bias.dequantize(vb.device())?, input_gate_weight: input_gate_weight.dequantize(vb.device())?, recurrent_gate_bias: recurrent_gate_bias.dequantize(vb.device())?, recurrent_gate_weight: recurrent_gate_weight.dequantize(vb.device())?, block_width, n_heads, recurrent_states: None, }) } #[derive(Debug, Clone)] struct RecurrentBlock { linear_y: Linear, linear_x: Linear, linear_out: Linear, conv_1d: candle_nn::Conv1d, conv1d_state: Option<Tensor>, conv1d_width: usize, rg_lru: Rglru, act_fn: candle_nn::Activation, } impl RecurrentBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let lru_width = cfg.lru_width.unwrap_or(h); let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?; let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?; let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?; let conv_1d = { let ws = vb .get((lru_width, 1, cfg.conv1d_width), "conv_1d.weight")? .dequantize(vb.device())?; let bs = vb.get(lru_width, "conv_1d.bias")?.dequantize(vb.device())?; let config = candle_nn::Conv1dConfig { groups: lru_width, padding: cfg.conv1d_width - 1, ..Default::default() }; candle_nn::Conv1d::new(ws, Some(bs), config) }; let rg_lru = rglru(cfg, vb.pp("rg_lru"))?; Ok(Self { linear_y, linear_x, linear_out, conv_1d, conv1d_state: None, conv1d_width: cfg.conv1d_width, rg_lru, act_fn: cfg.hidden_activation, }) } pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (_b_sz, seq_len, _) = xs.dims3()?; let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?; let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?; let x_branch = if pos == 0 { let x_len = x_branch.dim(D::Minus1)?; let pad = self.conv1d_width as i64 - x_len as i64 - 1; let padded = match pad.cmp(&0) { std::cmp::Ordering::Equal => x_branch.clone(), std::cmp::Ordering::Less => { let rev_pad = (-pad) as usize; x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)? } std::cmp::Ordering::Greater => { x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)? } }; self.conv1d_state = Some(padded); x_branch .apply(&self.conv_1d)? .narrow(D::Minus1, 0, seq_len)? } else { let conv_state = match self.conv1d_state.as_ref() { None => candle::bail!("empty cache despite pos > 0"), Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?, }; let w = self.conv_1d.weight().i((.., 0, ..))?; let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?; let x_branch = match self.conv_1d.bias() { None => x_branch, Some(b) => x_branch.broadcast_add(b)?, }; let x_branch = x_branch.unsqueeze(D::Minus1)?; self.conv1d_state = Some(conv_state.i((.., .., 1..))?); x_branch }; let x_branch = x_branch.transpose(1, 2)?; let x_branch = self.rg_lru.forward(&x_branch, pos)?; (x_branch * y_branch)?.apply(&self.linear_out) } } #[derive(Debug, Clone)] struct SdpaAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_heads: usize, n_kv_heads: usize, head_dim: usize, hidden_size: usize, kv_cache: Option<(Tensor, Tensor)>, rotary_emb: Arc<RotaryEmbedding>, } impl SdpaAttention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let n_heads = cfg.num_attention_heads; let n_kv_heads = cfg.num_key_value_heads; let hd = cfg.head_dim; let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?; let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?; let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?; let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_heads, n_kv_heads, head_dim: hd, hidden_size: h, kv_cache: None, rotary_emb, }) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_heads / self.n_kv_heads; crate::utils::repeat_kv(x, n_rep) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { let (bsz, q_len, _) = xs.dims3()?; let query_states = xs.apply(&self.q_proj)?; let key_states = xs.apply(&self.k_proj)?; let value_states = xs.apply(&self.v_proj)?; let query_states = query_states .reshape((bsz, q_len, self.n_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((bsz, q_len, self.n_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((bsz, q_len, self.n_kv_heads, self.head_dim))? .transpose(1, 2)?; let query_states = query_states.chunk(2, D::Minus1)?; let key_states = key_states.chunk(2, D::Minus1)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?; let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = self.repeat_kv(key_states)?; let value_states = self.repeat_kv(value_states)?; let xs = { let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?; let att = if q_len == 1 { att } else { match attention_mask { None => att, Some(mask) => att.broadcast_add(mask)?, } }; let att = candle_nn::ops::softmax_last_dim(&att)?; att.matmul(&value_states.contiguous()?)? }; let xs = xs .transpose(1, 2)? .reshape((bsz, q_len, self.hidden_size))?; self.o_proj.forward(&xs) } } #[derive(Debug, Clone)] enum TemporalBlock { Recurrent(RecurrentBlock), Attention(SdpaAttention), } impl TemporalBlock { fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { match self { Self::Recurrent(b) => b.forward(xs, pos), Self::Attention(b) => b.forward(xs, attention_mask, pos), } } } #[derive(Debug, Clone)] struct DecoderLayer { temporal_pre_norm: RmsNorm, channel_pre_norm: RmsNorm, temporal_block: TemporalBlock, mlp_block: Mlp, } impl DecoderLayer { fn new( block_idx: usize, rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let h = cfg.hidden_size; let temporal_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?; let channel_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?; let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] { TemporalBlockType::Recurrent => { let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?; TemporalBlock::Recurrent(block) } TemporalBlockType::Attention => { let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?; TemporalBlock::Attention(block) } }; let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?; Ok(Self { temporal_pre_norm, channel_pre_norm, temporal_block, mlp_block, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.temporal_pre_norm)?; let xs = self.temporal_block.forward(&xs, attention_mask, pos)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?; xs + residual } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, final_norm: RmsNorm, lm_head: Linear, hidden_size: usize, logits_soft_cap: f64, device: Device, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb.device())?); let vb_b = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); for idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?; layers.push(layer) } let final_norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?; let lm_head = linear( cfg.hidden_size, cfg.vocab_size, false, vb.pp("embed_tokens"), )?; Ok(Self { embed_tokens, layers, final_norm, lm_head, hidden_size: cfg.hidden_size, logits_soft_cap: cfg.logits_soft_cap, device: vb.device().clone(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (b_size, seq_len) = xs.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?; Some(mask) }; let xs = xs.apply(&self.embed_tokens)?; let mut xs = (xs * (self.hidden_size as f64).sqrt())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), pos)?; } let logits = xs .narrow(1, seq_len - 1, 1)? .apply(&self.final_norm)? .apply(&self.lm_head)?; let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?; Ok(logits) } }
candle/candle-transformers/src/models/quantized_recurrent_gemma.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_recurrent_gemma.rs", "repo_id": "candle", "token_count": 7858 }
61
use candle::{IndexOp, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use super::transformer::TwoWayTransformer; #[derive(Debug)] struct MlpMaskDecoder { layers: Vec<super::Linear>, sigmoid_output: bool, span: tracing::Span, } impl MlpMaskDecoder { fn new( input_dim: usize, hidden_dim: usize, output_dim: usize, num_layers: usize, sigmoid_output: bool, vb: VarBuilder, ) -> Result<Self> { let mut layers = Vec::with_capacity(num_layers); let vb = vb.pp("layers"); for i in 0..num_layers { let in_dim = if i == 0 { input_dim } else { hidden_dim }; let out_dim = if i + 1 == num_layers { output_dim } else { hidden_dim }; let layer = super::linear(vb.pp(i), in_dim, out_dim, true)?; layers.push(layer) } let span = tracing::span!(tracing::Level::TRACE, "mlp-mask-decoder"); Ok(Self { layers, sigmoid_output, span, }) } } impl Module for MlpMaskDecoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (i, layer) in self.layers.iter().enumerate() { xs = layer.forward(&xs)?; if i + 1 < self.layers.len() { xs = xs.relu()? } } if self.sigmoid_output { candle_nn::ops::sigmoid(&xs) } else { Ok(xs) } } } #[derive(Debug)] pub struct MaskDecoder { iou_token: candle_nn::Embedding, mask_tokens: candle_nn::Embedding, iou_prediction_head: MlpMaskDecoder, output_upscaling_conv1: candle_nn::ConvTranspose2d, output_upscaling_ln: super::LayerNorm2d, output_upscaling_conv2: candle_nn::ConvTranspose2d, num_mask_tokens: usize, output_hypernetworks_mlps: Vec<MlpMaskDecoder>, transformer: TwoWayTransformer, span: tracing::Span, } impl MaskDecoder { pub fn new( transformer_dim: usize, num_multimask_outputs: usize, iou_head_depth: usize, iou_head_hidden_dim: usize, vb: VarBuilder, ) -> Result<Self> { let num_mask_tokens = num_multimask_outputs + 1; let iou_prediction_head = MlpMaskDecoder::new( transformer_dim, iou_head_hidden_dim, num_mask_tokens, iou_head_depth, false, vb.pp("iou_prediction_head"), )?; let iou_token = candle_nn::embedding(1, transformer_dim, vb.pp("iou_token"))?; let mask_tokens = candle_nn::embedding(num_mask_tokens, transformer_dim, vb.pp("mask_tokens"))?; let cfg = candle_nn::ConvTranspose2dConfig { stride: 2, ..Default::default() }; let output_upscaling_conv1 = candle_nn::conv_transpose2d( transformer_dim, transformer_dim / 4, 2, cfg, vb.pp("output_upscaling.0"), )?; let output_upscaling_ln = super::LayerNorm2d::new(transformer_dim / 4, 1e-6, vb.pp("output_upscaling.1"))?; let output_upscaling_conv2 = candle_nn::conv_transpose2d( transformer_dim / 4, transformer_dim / 8, 2, cfg, vb.pp("output_upscaling.3"), )?; let mut output_hypernetworks_mlps = Vec::with_capacity(num_mask_tokens); let vb_o = vb.pp("output_hypernetworks_mlps"); for i in 0..num_mask_tokens { let mlp = MlpMaskDecoder::new( transformer_dim, transformer_dim, transformer_dim / 8, 3, false, vb_o.pp(i), )?; output_hypernetworks_mlps.push(mlp) } let transformer = TwoWayTransformer::new( /* depth */ 2, /* embedding_dim */ transformer_dim, /* num_heads */ 8, /* mlp_dim */ 2048, vb.pp("transformer"), )?; let span = tracing::span!(tracing::Level::TRACE, "mask-decoder"); Ok(Self { iou_token, mask_tokens, iou_prediction_head, output_upscaling_conv1, output_upscaling_ln, output_upscaling_conv2, num_mask_tokens, output_hypernetworks_mlps, transformer, span, }) } pub fn forward( &self, image_embeddings: &Tensor, image_pe: &Tensor, sparse_prompt_embeddings: &Tensor, dense_prompt_embeddings: &Tensor, multimask_output: bool, ) -> Result<(Tensor, Tensor)> { let _enter = self.span.enter(); let (masks, iou_pred) = self.predict_masks( image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, )?; let masks = if multimask_output { masks.i((.., 1..))? } else { masks.i((.., 0..1))? }; let iou_pred = if multimask_output { iou_pred.i((.., 1..))? } else { iou_pred.i((.., 0..1))? }; Ok((masks, iou_pred)) } fn predict_masks( &self, image_embeddings: &Tensor, image_pe: &Tensor, sparse_prompt_embeddings: &Tensor, dense_prompt_embeddings: &Tensor, ) -> Result<(Tensor, Tensor)> { // Concatenate output tokens. let output_tokens = Tensor::cat( &[self.iou_token.embeddings(), self.mask_tokens.embeddings()], 0, )?; let (d1, d2) = output_tokens.dims2()?; let output_tokens = output_tokens .unsqueeze(0)? .expand((sparse_prompt_embeddings.dim(0)?, d1, d2))?; let tokens = Tensor::cat(&[&output_tokens, sparse_prompt_embeddings], 1)?; // Expand per-image data in batch direction to be per mask let src = repeat_interleave(image_embeddings, tokens.dim(0)?, 0)?; let src = src.broadcast_add(dense_prompt_embeddings)?; let pos_src = repeat_interleave(image_pe, tokens.dim(0)?, 0)?; let (b, c, h, w) = src.dims4()?; // Run the transformer let (hs, src) = self.transformer.forward(&src, &pos_src, &tokens)?; let iou_token_out = hs.i((.., 0))?; let mask_tokens_out = hs.i((.., 1..1 + self.num_mask_tokens))?; // Upscale mask embeddings and predict masks using the masks tokens. let src = src.transpose(1, 2)?.reshape((b, c, h, w))?; let upscaled_embedding = self .output_upscaling_conv1 .forward(&src)? .apply(&self.output_upscaling_ln)? .gelu()? .apply(&self.output_upscaling_conv2)? .gelu()?; let mut hyper_in_list = Vec::with_capacity(self.num_mask_tokens); for (i, mlp) in self.output_hypernetworks_mlps.iter().enumerate() { let h = mlp.forward(&mask_tokens_out.i((.., i))?)?; hyper_in_list.push(h) } let hyper_in = Tensor::stack(hyper_in_list.as_slice(), 1)?.contiguous()?; let (b, c, h, w) = upscaled_embedding.dims4()?; let masks = hyper_in.matmul(&upscaled_embedding.reshape((b, c, h * w))?)?; let masks = masks.reshape((b, (), h, w))?; // Generate mask quality predictions. let iou_pred = self.iou_prediction_head.forward(&iou_token_out)?; Ok((masks, iou_pred)) } } // Equivalent to torch.repeat_interleave fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> { let img = img.unsqueeze(dim + 1)?; let mut dims = img.dims().to_vec(); dims[dim + 1] = repeats; img.broadcast_as(dims)?.flatten(dim, dim + 1) }
candle/candle-transformers/src/models/segment_anything/mask_decoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/mask_decoder.rs", "repo_id": "candle", "token_count": 4213 }
62
#![allow(dead_code)] //! # Diffusion pipelines and models //! //! Noise schedulers can be used to set the trade-off between //! inference speed and quality. use candle::{Result, Tensor}; pub trait SchedulerConfig: std::fmt::Debug + Send + Sync { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>>; } /// This trait represents a scheduler for the diffusion process. pub trait Scheduler { fn timesteps(&self) -> &[usize]; fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor>; fn init_noise_sigma(&self) -> f64; fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor>; fn step(&mut self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor>; } /// This represents how beta ranges from its minimum value to the maximum /// during training. #[derive(Debug, Clone, Copy)] pub enum BetaSchedule { /// Linear interpolation. Linear, /// Linear interpolation of the square root of beta. ScaledLinear, /// Glide cosine schedule SquaredcosCapV2, } #[derive(Debug, Clone, Copy)] pub enum PredictionType { Epsilon, VPrediction, Sample, } /// Time step spacing for the diffusion process. /// /// "linspace", "leading", "trailing" corresponds to annotation of Table 2. of the [paper](https://arxiv.org/abs/2305.08891) #[derive(Debug, Clone, Copy)] pub enum TimestepSpacing { Leading, Linspace, Trailing, } impl Default for TimestepSpacing { fn default() -> Self { Self::Leading } } /// Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of /// `(1-beta)` over time from `t = [0,1]`. /// /// Contains a function `alpha_bar` that takes an argument `t` and transforms it to the cumulative product of `(1-beta)` /// up to that part of the diffusion process. pub(crate) fn betas_for_alpha_bar(num_diffusion_timesteps: usize, max_beta: f64) -> Result<Tensor> { let alpha_bar = |time_step: usize| { f64::cos((time_step as f64 + 0.008) / 1.008 * std::f64::consts::FRAC_PI_2).powi(2) }; let mut betas = Vec::with_capacity(num_diffusion_timesteps); for i in 0..num_diffusion_timesteps { let t1 = i / num_diffusion_timesteps; let t2 = (i + 1) / num_diffusion_timesteps; betas.push((1.0 - alpha_bar(t2) / alpha_bar(t1)).min(max_beta)); } let betas_len = betas.len(); Tensor::from_vec(betas, betas_len, &candle::Device::Cpu) }
candle/candle-transformers/src/models/stable_diffusion/schedulers.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/schedulers.rs", "repo_id": "candle", "token_count": 940 }
63
pub mod text_generation;
candle/candle-transformers/src/pipelines/mod.rs/0
{ "file_path": "candle/candle-transformers/src/pipelines/mod.rs", "repo_id": "candle", "token_count": 7 }
64
## Running [BLIP Image Captioning](https://huggingface.co/Salesforce/blip-image-captioning-large) Example ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model } from "./build/m.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
candle/candle-wasm-examples/blip/README.md/0
{ "file_path": "candle/candle-wasm-examples/blip/README.md", "repo_id": "candle", "token_count": 220 }
65